2024-12-04 06:50:28,169 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-04 06:50:28,181 main DEBUG Took 0.009821 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-04 06:50:28,181 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-04 06:50:28,181 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-04 06:50:28,182 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-04 06:50:28,183 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 06:50:28,190 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-04 06:50:28,201 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 06:50:28,202 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 06:50:28,202 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 06:50:28,203 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 06:50:28,203 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 06:50:28,203 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 06:50:28,204 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 06:50:28,204 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 06:50:28,205 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 06:50:28,205 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 06:50:28,206 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 06:50:28,206 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 06:50:28,206 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 06:50:28,207 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 06:50:28,207 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 06:50:28,207 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 06:50:28,208 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 06:50:28,208 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 06:50:28,208 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 06:50:28,209 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 06:50:28,209 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 06:50:28,209 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 06:50:28,210 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 06:50:28,210 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 06:50:28,210 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 06:50:28,211 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-04 06:50:28,212 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 06:50:28,213 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-04 06:50:28,215 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-04 06:50:28,215 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-04 06:50:28,216 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-04 06:50:28,216 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-04 06:50:28,224 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-04 06:50:28,227 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-04 06:50:28,228 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-04 06:50:28,229 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-04 06:50:28,229 main DEBUG createAppenders(={Console}) 2024-12-04 06:50:28,230 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-12-04 06:50:28,230 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-04 06:50:28,230 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-12-04 06:50:28,231 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-04 06:50:28,231 main DEBUG OutputStream closed 2024-12-04 06:50:28,231 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-04 06:50:28,231 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-04 06:50:28,232 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-12-04 06:50:28,297 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-04 06:50:28,298 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-04 06:50:28,299 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-04 06:50:28,300 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-04 06:50:28,301 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-04 06:50:28,301 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-04 06:50:28,301 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-04 06:50:28,302 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-04 06:50:28,302 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-04 06:50:28,302 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-04 06:50:28,302 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-04 06:50:28,303 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-04 06:50:28,303 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-04 06:50:28,303 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-04 06:50:28,303 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-04 06:50:28,304 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-04 06:50:28,304 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-04 06:50:28,305 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-04 06:50:28,307 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-04 06:50:28,307 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-12-04 06:50:28,308 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-04 06:50:28,308 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-12-04T06:50:28,562 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd 2024-12-04 06:50:28,565 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-04 06:50:28,566 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-04T06:50:28,575 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-12-04T06:50:28,613 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=150, ProcessCount=11, AvailableMemoryMB=6988 2024-12-04T06:50:28,616 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-04T06:50:28,635 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/cluster_3cc6c746-eb6c-4595-67e3-3ec4389ad478, deleteOnExit=true 2024-12-04T06:50:28,636 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-04T06:50:28,637 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/test.cache.data in system properties and HBase conf 2024-12-04T06:50:28,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/hadoop.tmp.dir in system properties and HBase conf 2024-12-04T06:50:28,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/hadoop.log.dir in system properties and HBase conf 2024-12-04T06:50:28,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-04T06:50:28,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-04T06:50:28,641 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-04T06:50:28,744 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-04T06:50:28,839 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-04T06:50:28,843 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-04T06:50:28,844 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-04T06:50:28,844 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-04T06:50:28,844 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-04T06:50:28,845 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-04T06:50:28,845 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-04T06:50:28,846 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-04T06:50:28,846 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-04T06:50:28,847 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-04T06:50:28,847 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/nfs.dump.dir in system properties and HBase conf 2024-12-04T06:50:28,847 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/java.io.tmpdir in system properties and HBase conf 2024-12-04T06:50:28,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-04T06:50:28,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-04T06:50:28,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-04T06:50:29,327 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-04T06:50:29,673 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-04T06:50:29,763 INFO [Time-limited test {}] log.Log(170): Logging initialized @2287ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-04T06:50:29,848 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:50:29,910 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T06:50:29,929 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T06:50:29,930 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T06:50:29,931 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T06:50:29,944 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:50:29,946 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/hadoop.log.dir/,AVAILABLE} 2024-12-04T06:50:29,947 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T06:50:30,142 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/java.io.tmpdir/jetty-localhost-37223-hadoop-hdfs-3_4_1-tests_jar-_-any-2920169026110107690/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-04T06:50:30,152 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:37223} 2024-12-04T06:50:30,152 INFO [Time-limited test {}] server.Server(415): Started @2677ms 2024-12-04T06:50:30,182 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-04T06:50:30,541 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:50:30,549 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T06:50:30,552 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T06:50:30,552 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T06:50:30,553 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-04T06:50:30,554 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/hadoop.log.dir/,AVAILABLE} 2024-12-04T06:50:30,555 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T06:50:30,684 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/java.io.tmpdir/jetty-localhost-36257-hadoop-hdfs-3_4_1-tests_jar-_-any-2285696794030577685/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:50:30,684 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:36257} 2024-12-04T06:50:30,685 INFO [Time-limited test {}] server.Server(415): Started @3209ms 2024-12-04T06:50:30,739 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T06:50:30,860 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:50:30,865 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T06:50:30,877 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T06:50:30,877 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T06:50:30,877 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-04T06:50:30,878 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/hadoop.log.dir/,AVAILABLE} 2024-12-04T06:50:30,879 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T06:50:31,028 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/java.io.tmpdir/jetty-localhost-33217-hadoop-hdfs-3_4_1-tests_jar-_-any-5300486601807498452/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:50:31,029 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:33217} 2024-12-04T06:50:31,029 INFO [Time-limited test {}] server.Server(415): Started @3554ms 2024-12-04T06:50:31,031 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T06:50:31,186 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/cluster_3cc6c746-eb6c-4595-67e3-3ec4389ad478/data/data1/current/BP-14357224-172.17.0.2-1733295029427/current, will proceed with Du for space computation calculation, 2024-12-04T06:50:31,186 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/cluster_3cc6c746-eb6c-4595-67e3-3ec4389ad478/data/data2/current/BP-14357224-172.17.0.2-1733295029427/current, will proceed with Du for space computation calculation, 2024-12-04T06:50:31,186 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/cluster_3cc6c746-eb6c-4595-67e3-3ec4389ad478/data/data3/current/BP-14357224-172.17.0.2-1733295029427/current, will proceed with Du for space computation calculation, 2024-12-04T06:50:31,188 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/cluster_3cc6c746-eb6c-4595-67e3-3ec4389ad478/data/data4/current/BP-14357224-172.17.0.2-1733295029427/current, will proceed with Du for space computation calculation, 2024-12-04T06:50:31,247 WARN [Thread-82 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T06:50:31,247 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T06:50:31,318 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xec490abd624573b2 with lease ID 0x2315ad1b373b5fd2: Processing first storage report for DS-056361a8-3721-4704-ab3a-3447e456f9d5 from datanode DatanodeRegistration(127.0.0.1:33339, datanodeUuid=38d40b95-bb2f-494b-bfbc-f254c2127156, infoPort=46651, infoSecurePort=0, ipcPort=45391, storageInfo=lv=-57;cid=testClusterID;nsid=1371502633;c=1733295029427) 2024-12-04T06:50:31,319 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xec490abd624573b2 with lease ID 0x2315ad1b373b5fd2: from storage DS-056361a8-3721-4704-ab3a-3447e456f9d5 node DatanodeRegistration(127.0.0.1:33339, datanodeUuid=38d40b95-bb2f-494b-bfbc-f254c2127156, infoPort=46651, infoSecurePort=0, ipcPort=45391, storageInfo=lv=-57;cid=testClusterID;nsid=1371502633;c=1733295029427), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-04T06:50:31,320 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd3a0279c2f951b7d with lease ID 0x2315ad1b373b5fd1: Processing first storage report for DS-52869328-cce3-44d0-b0a1-5d2b389e2659 from datanode DatanodeRegistration(127.0.0.1:36347, datanodeUuid=ba6926ec-cfd9-48eb-b711-8df620d49d27, infoPort=40691, infoSecurePort=0, ipcPort=32831, storageInfo=lv=-57;cid=testClusterID;nsid=1371502633;c=1733295029427) 2024-12-04T06:50:31,320 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd3a0279c2f951b7d with lease ID 0x2315ad1b373b5fd1: from storage DS-52869328-cce3-44d0-b0a1-5d2b389e2659 node DatanodeRegistration(127.0.0.1:36347, datanodeUuid=ba6926ec-cfd9-48eb-b711-8df620d49d27, infoPort=40691, infoSecurePort=0, ipcPort=32831, storageInfo=lv=-57;cid=testClusterID;nsid=1371502633;c=1733295029427), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:50:31,320 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xec490abd624573b2 with lease ID 0x2315ad1b373b5fd2: Processing first storage report for DS-178bf604-a9d2-4bac-ac7a-21adf6c59ef9 from datanode DatanodeRegistration(127.0.0.1:33339, datanodeUuid=38d40b95-bb2f-494b-bfbc-f254c2127156, infoPort=46651, infoSecurePort=0, ipcPort=45391, storageInfo=lv=-57;cid=testClusterID;nsid=1371502633;c=1733295029427) 2024-12-04T06:50:31,321 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xec490abd624573b2 with lease ID 0x2315ad1b373b5fd2: from storage DS-178bf604-a9d2-4bac-ac7a-21adf6c59ef9 node DatanodeRegistration(127.0.0.1:33339, datanodeUuid=38d40b95-bb2f-494b-bfbc-f254c2127156, infoPort=46651, infoSecurePort=0, ipcPort=45391, storageInfo=lv=-57;cid=testClusterID;nsid=1371502633;c=1733295029427), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-04T06:50:31,321 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd3a0279c2f951b7d with lease ID 0x2315ad1b373b5fd1: Processing first storage report for DS-cc278306-14b2-4a51-b1da-e9ae5cf71214 from datanode DatanodeRegistration(127.0.0.1:36347, datanodeUuid=ba6926ec-cfd9-48eb-b711-8df620d49d27, infoPort=40691, infoSecurePort=0, ipcPort=32831, storageInfo=lv=-57;cid=testClusterID;nsid=1371502633;c=1733295029427) 2024-12-04T06:50:31,321 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd3a0279c2f951b7d with lease ID 0x2315ad1b373b5fd1: from storage DS-cc278306-14b2-4a51-b1da-e9ae5cf71214 node DatanodeRegistration(127.0.0.1:36347, datanodeUuid=ba6926ec-cfd9-48eb-b711-8df620d49d27, infoPort=40691, infoSecurePort=0, ipcPort=32831, storageInfo=lv=-57;cid=testClusterID;nsid=1371502633;c=1733295029427), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:50:31,436 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd 2024-12-04T06:50:31,524 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/cluster_3cc6c746-eb6c-4595-67e3-3ec4389ad478/zookeeper_0, clientPort=56093, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/cluster_3cc6c746-eb6c-4595-67e3-3ec4389ad478/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/cluster_3cc6c746-eb6c-4595-67e3-3ec4389ad478/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-04T06:50:31,533 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56093 2024-12-04T06:50:31,543 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:50:31,546 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:50:31,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33339 is added to blk_1073741825_1001 (size=7) 2024-12-04T06:50:31,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741825_1001 (size=7) 2024-12-04T06:50:32,205 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95 with version=8 2024-12-04T06:50:32,205 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/hbase-staging 2024-12-04T06:50:32,296 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-04T06:50:32,543 INFO [Time-limited test {}] client.ConnectionUtils(128): master/607fd5c6574c:0 server-side Connection retries=45 2024-12-04T06:50:32,554 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T06:50:32,554 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T06:50:32,559 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T06:50:32,559 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T06:50:32,559 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T06:50:32,706 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-04T06:50:32,766 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-04T06:50:32,775 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-04T06:50:32,779 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T06:50:32,806 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 2496 (auto-detected) 2024-12-04T06:50:32,807 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-04T06:50:32,826 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40555 2024-12-04T06:50:32,847 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40555 connecting to ZooKeeper ensemble=127.0.0.1:56093 2024-12-04T06:50:32,882 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:405550x0, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T06:50:32,885 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40555-0x1017c3d2f610000 connected 2024-12-04T06:50:32,913 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:50:32,915 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:50:32,924 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40555-0x1017c3d2f610000, quorum=127.0.0.1:56093, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T06:50:32,929 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95, hbase.cluster.distributed=false 2024-12-04T06:50:32,954 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40555-0x1017c3d2f610000, quorum=127.0.0.1:56093, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T06:50:32,959 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40555 2024-12-04T06:50:32,959 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40555 2024-12-04T06:50:32,960 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40555 2024-12-04T06:50:32,960 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40555 2024-12-04T06:50:32,961 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40555 2024-12-04T06:50:33,070 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/607fd5c6574c:0 server-side Connection retries=45 2024-12-04T06:50:33,072 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T06:50:33,072 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T06:50:33,072 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T06:50:33,073 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T06:50:33,073 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T06:50:33,076 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-04T06:50:33,078 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T06:50:33,079 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41055 2024-12-04T06:50:33,080 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41055 connecting to ZooKeeper ensemble=127.0.0.1:56093 2024-12-04T06:50:33,081 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:50:33,085 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:50:33,091 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:410550x0, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T06:50:33,092 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41055-0x1017c3d2f610001 connected 2024-12-04T06:50:33,093 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41055-0x1017c3d2f610001, quorum=127.0.0.1:56093, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T06:50:33,097 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-04T06:50:33,104 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-04T06:50:33,107 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41055-0x1017c3d2f610001, quorum=127.0.0.1:56093, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-04T06:50:33,112 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41055-0x1017c3d2f610001, quorum=127.0.0.1:56093, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T06:50:33,112 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41055 2024-12-04T06:50:33,113 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41055 2024-12-04T06:50:33,113 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41055 2024-12-04T06:50:33,114 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41055 2024-12-04T06:50:33,114 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41055 2024-12-04T06:50:33,129 DEBUG [M:0;607fd5c6574c:40555 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;607fd5c6574c:40555 2024-12-04T06:50:33,130 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/607fd5c6574c,40555,1733295032348 2024-12-04T06:50:33,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40555-0x1017c3d2f610000, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T06:50:33,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41055-0x1017c3d2f610001, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T06:50:33,139 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40555-0x1017c3d2f610000, quorum=127.0.0.1:56093, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/607fd5c6574c,40555,1733295032348 2024-12-04T06:50:33,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41055-0x1017c3d2f610001, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-04T06:50:33,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40555-0x1017c3d2f610000, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:50:33,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41055-0x1017c3d2f610001, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:50:33,160 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40555-0x1017c3d2f610000, quorum=127.0.0.1:56093, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-04T06:50:33,161 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/607fd5c6574c,40555,1733295032348 from backup master directory 2024-12-04T06:50:33,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40555-0x1017c3d2f610000, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/607fd5c6574c,40555,1733295032348 2024-12-04T06:50:33,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41055-0x1017c3d2f610001, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T06:50:33,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40555-0x1017c3d2f610000, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T06:50:33,166 WARN [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T06:50:33,166 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=607fd5c6574c,40555,1733295032348 2024-12-04T06:50:33,168 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-04T06:50:33,170 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-04T06:50:33,228 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/hbase.id] with ID: f339064b-3662-4b62-abeb-a0ca31c91bbb 2024-12-04T06:50:33,228 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/.tmp/hbase.id 2024-12-04T06:50:33,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741826_1002 (size=42) 2024-12-04T06:50:33,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33339 is added to blk_1073741826_1002 (size=42) 2024-12-04T06:50:33,242 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/.tmp/hbase.id]:[hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/hbase.id] 2024-12-04T06:50:33,286 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:50:33,291 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-04T06:50:33,310 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 17ms. 2024-12-04T06:50:33,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40555-0x1017c3d2f610000, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:50:33,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41055-0x1017c3d2f610001, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:50:33,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741827_1003 (size=196) 2024-12-04T06:50:33,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33339 is added to blk_1073741827_1003 (size=196) 2024-12-04T06:50:33,347 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T06:50:33,349 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-04T06:50:33,356 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T06:50:33,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741828_1004 (size=1189) 2024-12-04T06:50:33,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33339 is added to blk_1073741828_1004 (size=1189) 2024-12-04T06:50:33,404 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/MasterData/data/master/store 2024-12-04T06:50:33,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33339 is added to blk_1073741829_1005 (size=34) 2024-12-04T06:50:33,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741829_1005 (size=34) 2024-12-04T06:50:33,427 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-04T06:50:33,431 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:50:33,433 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-04T06:50:33,433 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:50:33,433 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:50:33,435 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-04T06:50:33,435 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:50:33,436 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:50:33,437 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733295033433Disabling compacts and flushes for region at 1733295033433Disabling writes for close at 1733295033435 (+2 ms)Writing region close event to WAL at 1733295033436 (+1 ms)Closed at 1733295033436 2024-12-04T06:50:33,440 WARN [master/607fd5c6574c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/MasterData/data/master/store/.initializing 2024-12-04T06:50:33,440 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/MasterData/WALs/607fd5c6574c,40555,1733295032348 2024-12-04T06:50:33,462 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=607fd5c6574c%2C40555%2C1733295032348, suffix=, logDir=hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/MasterData/WALs/607fd5c6574c,40555,1733295032348, archiveDir=hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/MasterData/oldWALs, maxLogs=10 2024-12-04T06:50:33,471 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C40555%2C1733295032348.1733295033467 2024-12-04T06:50:33,489 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/MasterData/WALs/607fd5c6574c,40555,1733295032348/607fd5c6574c%2C40555%2C1733295032348.1733295033467 2024-12-04T06:50:33,498 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40691:40691),(127.0.0.1/127.0.0.1:46651:46651)] 2024-12-04T06:50:33,499 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-04T06:50:33,500 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:50:33,504 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:50:33,505 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:50:33,544 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:50:33,571 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-04T06:50:33,575 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:50:33,578 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:50:33,578 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:50:33,581 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-04T06:50:33,581 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:50:33,582 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T06:50:33,583 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:50:33,585 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-04T06:50:33,585 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:50:33,586 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T06:50:33,586 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:50:33,589 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-04T06:50:33,589 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:50:33,590 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T06:50:33,591 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:50:33,594 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:50:33,595 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:50:33,600 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:50:33,600 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:50:33,603 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-04T06:50:33,608 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:50:33,615 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T06:50:33,617 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=824928, jitterRate=0.048950403928756714}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-04T06:50:33,623 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733295033519Initializing all the Stores at 1733295033521 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295033522 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295033522Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295033523 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295033523Cleaning up temporary data from old regions at 1733295033600 (+77 ms)Region opened successfully at 1733295033623 (+23 ms) 2024-12-04T06:50:33,625 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-04T06:50:33,666 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@219cfb30, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=607fd5c6574c/172.17.0.2:0 2024-12-04T06:50:33,702 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-04T06:50:33,714 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-04T06:50:33,714 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-04T06:50:33,718 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-04T06:50:33,719 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-04T06:50:33,725 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-12-04T06:50:33,725 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-04T06:50:33,758 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-04T06:50:33,769 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40555-0x1017c3d2f610000, quorum=127.0.0.1:56093, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-04T06:50:33,772 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-04T06:50:33,774 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-04T06:50:33,776 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40555-0x1017c3d2f610000, quorum=127.0.0.1:56093, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-04T06:50:33,778 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-04T06:50:33,780 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-04T06:50:33,783 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40555-0x1017c3d2f610000, quorum=127.0.0.1:56093, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-04T06:50:33,787 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-04T06:50:33,789 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40555-0x1017c3d2f610000, quorum=127.0.0.1:56093, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-04T06:50:33,791 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-04T06:50:33,811 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40555-0x1017c3d2f610000, quorum=127.0.0.1:56093, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-04T06:50:33,812 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-04T06:50:33,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40555-0x1017c3d2f610000, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T06:50:33,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41055-0x1017c3d2f610001, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T06:50:33,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40555-0x1017c3d2f610000, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:50:33,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41055-0x1017c3d2f610001, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:50:33,820 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=607fd5c6574c,40555,1733295032348, sessionid=0x1017c3d2f610000, setting cluster-up flag (Was=false) 2024-12-04T06:50:33,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40555-0x1017c3d2f610000, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:50:33,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41055-0x1017c3d2f610001, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:50:33,845 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-04T06:50:33,847 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=607fd5c6574c,40555,1733295032348 2024-12-04T06:50:33,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40555-0x1017c3d2f610000, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:50:33,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41055-0x1017c3d2f610001, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:50:33,861 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-04T06:50:33,862 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=607fd5c6574c,40555,1733295032348 2024-12-04T06:50:33,868 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-04T06:50:33,918 INFO [RS:0;607fd5c6574c:41055 {}] regionserver.HRegionServer(746): ClusterId : f339064b-3662-4b62-abeb-a0ca31c91bbb 2024-12-04T06:50:33,921 DEBUG [RS:0;607fd5c6574c:41055 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-04T06:50:33,926 DEBUG [RS:0;607fd5c6574c:41055 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-04T06:50:33,926 DEBUG [RS:0;607fd5c6574c:41055 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-04T06:50:33,929 DEBUG [RS:0;607fd5c6574c:41055 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-04T06:50:33,930 DEBUG [RS:0;607fd5c6574c:41055 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1038e8c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=607fd5c6574c/172.17.0.2:0 2024-12-04T06:50:33,941 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-04T06:50:33,945 DEBUG [RS:0;607fd5c6574c:41055 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;607fd5c6574c:41055 2024-12-04T06:50:33,947 INFO [RS:0;607fd5c6574c:41055 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-04T06:50:33,948 INFO [RS:0;607fd5c6574c:41055 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-04T06:50:33,948 DEBUG [RS:0;607fd5c6574c:41055 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-04T06:50:33,950 INFO [RS:0;607fd5c6574c:41055 {}] regionserver.HRegionServer(2659): reportForDuty to master=607fd5c6574c,40555,1733295032348 with port=41055, startcode=1733295033031 2024-12-04T06:50:33,951 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-04T06:50:33,957 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-04T06:50:33,964 DEBUG [RS:0;607fd5c6574c:41055 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-04T06:50:33,963 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 607fd5c6574c,40555,1733295032348 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-04T06:50:33,970 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/607fd5c6574c:0, corePoolSize=5, maxPoolSize=5 2024-12-04T06:50:33,970 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/607fd5c6574c:0, corePoolSize=5, maxPoolSize=5 2024-12-04T06:50:33,970 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/607fd5c6574c:0, corePoolSize=5, maxPoolSize=5 2024-12-04T06:50:33,970 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/607fd5c6574c:0, corePoolSize=5, maxPoolSize=5 2024-12-04T06:50:33,970 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/607fd5c6574c:0, corePoolSize=10, maxPoolSize=10 2024-12-04T06:50:33,970 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:50:33,970 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/607fd5c6574c:0, corePoolSize=2, maxPoolSize=2 2024-12-04T06:50:33,971 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:50:33,974 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733295063974 2024-12-04T06:50:33,975 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-04T06:50:33,975 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T06:50:33,976 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-04T06:50:33,976 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-04T06:50:33,980 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-04T06:50:33,981 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-04T06:50:33,981 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-04T06:50:33,981 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-04T06:50:33,983 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:50:33,983 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-04T06:50:33,982 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-04T06:50:33,985 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-04T06:50:33,986 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-04T06:50:33,987 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-04T06:50:33,989 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-04T06:50:33,989 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-04T06:50:33,990 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.large.0-1733295033990,5,FailOnTimeoutGroup] 2024-12-04T06:50:33,992 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.small.0-1733295033991,5,FailOnTimeoutGroup] 2024-12-04T06:50:33,992 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-04T06:50:33,993 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-04T06:50:33,994 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-04T06:50:33,994 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-04T06:50:34,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33339 is added to blk_1073741831_1007 (size=1321) 2024-12-04T06:50:34,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741831_1007 (size=1321) 2024-12-04T06:50:34,015 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-04T06:50:34,016 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95 2024-12-04T06:50:34,052 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48129, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-04T06:50:34,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741832_1008 (size=32) 2024-12-04T06:50:34,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33339 is added to blk_1073741832_1008 (size=32) 2024-12-04T06:50:34,057 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:50:34,060 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40555 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 607fd5c6574c,41055,1733295033031 2024-12-04T06:50:34,062 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-04T06:50:34,063 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40555 {}] master.ServerManager(517): Registering regionserver=607fd5c6574c,41055,1733295033031 2024-12-04T06:50:34,065 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-04T06:50:34,066 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:50:34,067 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:50:34,067 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-04T06:50:34,075 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-04T06:50:34,075 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:50:34,076 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:50:34,076 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-04T06:50:34,081 DEBUG [RS:0;607fd5c6574c:41055 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95 2024-12-04T06:50:34,081 DEBUG [RS:0;607fd5c6574c:41055 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38117 2024-12-04T06:50:34,081 DEBUG [RS:0;607fd5c6574c:41055 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-04T06:50:34,086 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-04T06:50:34,087 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:50:34,088 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:50:34,088 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-04T06:50:34,089 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40555-0x1017c3d2f610000, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T06:50:34,090 DEBUG [RS:0;607fd5c6574c:41055 {}] zookeeper.ZKUtil(111): regionserver:41055-0x1017c3d2f610001, quorum=127.0.0.1:56093, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/607fd5c6574c,41055,1733295033031 2024-12-04T06:50:34,090 WARN [RS:0;607fd5c6574c:41055 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T06:50:34,090 INFO [RS:0;607fd5c6574c:41055 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T06:50:34,090 DEBUG [RS:0;607fd5c6574c:41055 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031 2024-12-04T06:50:34,091 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-04T06:50:34,091 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:50:34,092 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:50:34,092 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-04T06:50:34,092 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [607fd5c6574c,41055,1733295033031] 2024-12-04T06:50:34,093 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/hbase/meta/1588230740 2024-12-04T06:50:34,094 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/hbase/meta/1588230740 2024-12-04T06:50:34,098 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-04T06:50:34,098 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-04T06:50:34,099 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T06:50:34,102 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-04T06:50:34,107 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T06:50:34,108 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=727199, jitterRate=-0.07531869411468506}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T06:50:34,111 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733295034057Initializing all the Stores at 1733295034060 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295034060Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295034062 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295034062Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295034062Cleaning up temporary data from old regions at 1733295034098 (+36 ms)Region opened successfully at 1733295034111 (+13 ms) 2024-12-04T06:50:34,111 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-04T06:50:34,111 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-04T06:50:34,111 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-04T06:50:34,112 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-04T06:50:34,112 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-04T06:50:34,117 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-04T06:50:34,117 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733295034111Disabling compacts and flushes for region at 1733295034111Disabling writes for close at 1733295034112 (+1 ms)Writing region close event to WAL at 1733295034117 (+5 ms)Closed at 1733295034117 2024-12-04T06:50:34,120 INFO [RS:0;607fd5c6574c:41055 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-04T06:50:34,121 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T06:50:34,121 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-04T06:50:34,128 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-04T06:50:34,133 INFO [RS:0;607fd5c6574c:41055 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-04T06:50:34,139 INFO [RS:0;607fd5c6574c:41055 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-04T06:50:34,139 INFO [RS:0;607fd5c6574c:41055 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T06:50:34,140 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-04T06:50:34,140 INFO [RS:0;607fd5c6574c:41055 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-04T06:50:34,145 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-04T06:50:34,147 INFO [RS:0;607fd5c6574c:41055 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-04T06:50:34,149 INFO [RS:0;607fd5c6574c:41055 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-04T06:50:34,149 DEBUG [RS:0;607fd5c6574c:41055 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:50:34,149 DEBUG [RS:0;607fd5c6574c:41055 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:50:34,149 DEBUG [RS:0;607fd5c6574c:41055 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:50:34,149 DEBUG [RS:0;607fd5c6574c:41055 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:50:34,150 DEBUG [RS:0;607fd5c6574c:41055 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:50:34,150 DEBUG [RS:0;607fd5c6574c:41055 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/607fd5c6574c:0, corePoolSize=2, maxPoolSize=2 2024-12-04T06:50:34,150 DEBUG [RS:0;607fd5c6574c:41055 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:50:34,150 DEBUG [RS:0;607fd5c6574c:41055 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:50:34,150 DEBUG [RS:0;607fd5c6574c:41055 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:50:34,150 DEBUG [RS:0;607fd5c6574c:41055 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:50:34,151 DEBUG [RS:0;607fd5c6574c:41055 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:50:34,151 DEBUG [RS:0;607fd5c6574c:41055 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:50:34,151 DEBUG [RS:0;607fd5c6574c:41055 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/607fd5c6574c:0, corePoolSize=3, maxPoolSize=3 2024-12-04T06:50:34,151 DEBUG [RS:0;607fd5c6574c:41055 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0, corePoolSize=3, maxPoolSize=3 2024-12-04T06:50:34,152 INFO [RS:0;607fd5c6574c:41055 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T06:50:34,152 INFO [RS:0;607fd5c6574c:41055 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T06:50:34,153 INFO [RS:0;607fd5c6574c:41055 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T06:50:34,153 INFO [RS:0;607fd5c6574c:41055 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-04T06:50:34,153 INFO [RS:0;607fd5c6574c:41055 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-04T06:50:34,153 INFO [RS:0;607fd5c6574c:41055 {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,41055,1733295033031-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T06:50:34,177 INFO [RS:0;607fd5c6574c:41055 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-04T06:50:34,180 INFO [RS:0;607fd5c6574c:41055 {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,41055,1733295033031-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T06:50:34,180 INFO [RS:0;607fd5c6574c:41055 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:50:34,180 INFO [RS:0;607fd5c6574c:41055 {}] regionserver.Replication(171): 607fd5c6574c,41055,1733295033031 started 2024-12-04T06:50:34,208 INFO [RS:0;607fd5c6574c:41055 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:50:34,208 INFO [RS:0;607fd5c6574c:41055 {}] regionserver.HRegionServer(1482): Serving as 607fd5c6574c,41055,1733295033031, RpcServer on 607fd5c6574c/172.17.0.2:41055, sessionid=0x1017c3d2f610001 2024-12-04T06:50:34,209 DEBUG [RS:0;607fd5c6574c:41055 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-04T06:50:34,209 DEBUG [RS:0;607fd5c6574c:41055 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 607fd5c6574c,41055,1733295033031 2024-12-04T06:50:34,210 DEBUG [RS:0;607fd5c6574c:41055 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '607fd5c6574c,41055,1733295033031' 2024-12-04T06:50:34,210 DEBUG [RS:0;607fd5c6574c:41055 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-04T06:50:34,211 DEBUG [RS:0;607fd5c6574c:41055 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-04T06:50:34,212 DEBUG [RS:0;607fd5c6574c:41055 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-04T06:50:34,212 DEBUG [RS:0;607fd5c6574c:41055 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-04T06:50:34,212 DEBUG [RS:0;607fd5c6574c:41055 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 607fd5c6574c,41055,1733295033031 2024-12-04T06:50:34,212 DEBUG [RS:0;607fd5c6574c:41055 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '607fd5c6574c,41055,1733295033031' 2024-12-04T06:50:34,212 DEBUG [RS:0;607fd5c6574c:41055 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-04T06:50:34,213 DEBUG [RS:0;607fd5c6574c:41055 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-04T06:50:34,214 DEBUG [RS:0;607fd5c6574c:41055 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-04T06:50:34,214 INFO [RS:0;607fd5c6574c:41055 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-04T06:50:34,214 INFO [RS:0;607fd5c6574c:41055 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-04T06:50:34,296 WARN [607fd5c6574c:40555 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-04T06:50:34,323 INFO [RS:0;607fd5c6574c:41055 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=607fd5c6574c%2C41055%2C1733295033031, suffix=, logDir=hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031, archiveDir=hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/oldWALs, maxLogs=32 2024-12-04T06:50:34,326 INFO [RS:0;607fd5c6574c:41055 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C41055%2C1733295033031.1733295034326 2024-12-04T06:50:34,336 INFO [RS:0;607fd5c6574c:41055 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295034326 2024-12-04T06:50:34,341 DEBUG [RS:0;607fd5c6574c:41055 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46651:46651),(127.0.0.1/127.0.0.1:40691:40691)] 2024-12-04T06:50:34,548 DEBUG [607fd5c6574c:40555 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-04T06:50:34,562 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=607fd5c6574c,41055,1733295033031 2024-12-04T06:50:34,570 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 607fd5c6574c,41055,1733295033031, state=OPENING 2024-12-04T06:50:34,576 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-04T06:50:34,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40555-0x1017c3d2f610000, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:50:34,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41055-0x1017c3d2f610001, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:50:34,580 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T06:50:34,581 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T06:50:34,582 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-04T06:50:34,585 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=607fd5c6574c,41055,1733295033031}] 2024-12-04T06:50:34,760 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-04T06:50:34,763 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35439, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-04T06:50:34,774 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-04T06:50:34,775 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T06:50:34,779 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=607fd5c6574c%2C41055%2C1733295033031.meta, suffix=.meta, logDir=hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031, archiveDir=hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/oldWALs, maxLogs=32 2024-12-04T06:50:34,781 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C41055%2C1733295033031.meta.1733295034781.meta 2024-12-04T06:50:34,794 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.meta.1733295034781.meta 2024-12-04T06:50:34,800 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40691:40691),(127.0.0.1/127.0.0.1:46651:46651)] 2024-12-04T06:50:34,801 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-04T06:50:34,803 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-04T06:50:34,806 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-04T06:50:34,812 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-04T06:50:34,817 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-04T06:50:34,818 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:50:34,818 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-04T06:50:34,818 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-04T06:50:34,822 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-04T06:50:34,824 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-04T06:50:34,824 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:50:34,825 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:50:34,825 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-04T06:50:34,826 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-04T06:50:34,827 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:50:34,827 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:50:34,828 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-04T06:50:34,829 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-04T06:50:34,829 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:50:34,830 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:50:34,830 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-04T06:50:34,832 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-04T06:50:34,832 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:50:34,833 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:50:34,833 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-04T06:50:34,835 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/hbase/meta/1588230740 2024-12-04T06:50:34,838 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/hbase/meta/1588230740 2024-12-04T06:50:34,841 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-04T06:50:34,841 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-04T06:50:34,842 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T06:50:34,845 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-04T06:50:34,848 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=776743, jitterRate=-0.012320652604103088}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T06:50:34,848 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-04T06:50:34,850 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733295034819Writing region info on filesystem at 1733295034819Initializing all the Stores at 1733295034821 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295034821Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295034821Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295034822 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295034822Cleaning up temporary data from old regions at 1733295034841 (+19 ms)Running coprocessor post-open hooks at 1733295034848 (+7 ms)Region opened successfully at 1733295034850 (+2 ms) 2024-12-04T06:50:34,859 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733295034751 2024-12-04T06:50:34,873 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-04T06:50:34,873 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-04T06:50:34,875 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=607fd5c6574c,41055,1733295033031 2024-12-04T06:50:34,877 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 607fd5c6574c,41055,1733295033031, state=OPEN 2024-12-04T06:50:34,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40555-0x1017c3d2f610000, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T06:50:34,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41055-0x1017c3d2f610001, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T06:50:34,883 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T06:50:34,883 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T06:50:34,883 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=607fd5c6574c,41055,1733295033031 2024-12-04T06:50:34,890 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-04T06:50:34,890 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=607fd5c6574c,41055,1733295033031 in 300 msec 2024-12-04T06:50:34,900 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-04T06:50:34,901 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 765 msec 2024-12-04T06:50:34,904 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T06:50:34,904 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-04T06:50:34,931 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-04T06:50:34,933 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=607fd5c6574c,41055,1733295033031, seqNum=-1] 2024-12-04T06:50:34,960 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T06:50:34,963 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44665, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T06:50:34,985 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0860 sec 2024-12-04T06:50:34,986 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733295034986, completionTime=-1 2024-12-04T06:50:34,988 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-04T06:50:34,988 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-04T06:50:35,017 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-04T06:50:35,017 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733295095017 2024-12-04T06:50:35,018 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733295155018 2024-12-04T06:50:35,018 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 29 msec 2024-12-04T06:50:35,021 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,40555,1733295032348-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T06:50:35,021 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,40555,1733295032348-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:50:35,021 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,40555,1733295032348-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:50:35,023 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-607fd5c6574c:40555, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:50:35,023 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-04T06:50:35,024 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-04T06:50:35,032 DEBUG [master/607fd5c6574c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-04T06:50:35,054 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.887sec 2024-12-04T06:50:35,056 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-04T06:50:35,057 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-04T06:50:35,058 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-04T06:50:35,059 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-04T06:50:35,059 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-04T06:50:35,060 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,40555,1733295032348-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T06:50:35,060 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,40555,1733295032348-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-04T06:50:35,068 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-04T06:50:35,069 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-04T06:50:35,070 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,40555,1733295032348-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:50:35,130 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b598c24, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T06:50:35,134 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-04T06:50:35,134 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-04T06:50:35,138 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 607fd5c6574c,40555,-1 for getting cluster id 2024-12-04T06:50:35,141 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-04T06:50:35,150 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f339064b-3662-4b62-abeb-a0ca31c91bbb' 2024-12-04T06:50:35,153 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-04T06:50:35,154 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f339064b-3662-4b62-abeb-a0ca31c91bbb" 2024-12-04T06:50:35,154 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1042026e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T06:50:35,154 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [607fd5c6574c,40555,-1] 2024-12-04T06:50:35,157 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-04T06:50:35,159 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:50:35,161 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36380, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-04T06:50:35,164 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36e15a4d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T06:50:35,164 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-04T06:50:35,171 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=607fd5c6574c,41055,1733295033031, seqNum=-1] 2024-12-04T06:50:35,172 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T06:50:35,174 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56862, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T06:50:35,195 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=607fd5c6574c,40555,1733295032348 2024-12-04T06:50:35,195 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:50:35,203 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-04T06:50:35,207 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-04T06:50:35,211 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 607fd5c6574c,40555,1733295032348 2024-12-04T06:50:35,214 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7257518 2024-12-04T06:50:35,215 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-04T06:50:35,218 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36396, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-04T06:50:35,220 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40555 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-04T06:50:35,220 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40555 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-04T06:50:35,224 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40555 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T06:50:35,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40555 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-12-04T06:50:35,235 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-04T06:50:35,238 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40555 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-12-04T06:50:35,238 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:50:35,241 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-04T06:50:35,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40555 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-04T06:50:35,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741835_1011 (size=389) 2024-12-04T06:50:35,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33339 is added to blk_1073741835_1011 (size=389) 2024-12-04T06:50:35,292 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 97ab18cb764195c6f193b8aac9e69a4a, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733295035219.97ab18cb764195c6f193b8aac9e69a4a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95 2024-12-04T06:50:35,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33339 is added to blk_1073741836_1012 (size=72) 2024-12-04T06:50:35,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741836_1012 (size=72) 2024-12-04T06:50:35,304 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733295035219.97ab18cb764195c6f193b8aac9e69a4a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:50:35,304 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 97ab18cb764195c6f193b8aac9e69a4a, disabling compactions & flushes 2024-12-04T06:50:35,304 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733295035219.97ab18cb764195c6f193b8aac9e69a4a. 2024-12-04T06:50:35,304 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733295035219.97ab18cb764195c6f193b8aac9e69a4a. 2024-12-04T06:50:35,304 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733295035219.97ab18cb764195c6f193b8aac9e69a4a. after waiting 0 ms 2024-12-04T06:50:35,304 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733295035219.97ab18cb764195c6f193b8aac9e69a4a. 2024-12-04T06:50:35,305 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733295035219.97ab18cb764195c6f193b8aac9e69a4a. 2024-12-04T06:50:35,305 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 97ab18cb764195c6f193b8aac9e69a4a: Waiting for close lock at 1733295035304Disabling compacts and flushes for region at 1733295035304Disabling writes for close at 1733295035304Writing region close event to WAL at 1733295035304Closed at 1733295035304 2024-12-04T06:50:35,307 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-04T06:50:35,314 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1733295035219.97ab18cb764195c6f193b8aac9e69a4a.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1733295035307"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733295035307"}]},"ts":"1733295035307"} 2024-12-04T06:50:35,320 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-04T06:50:35,323 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-04T06:50:35,326 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733295035323"}]},"ts":"1733295035323"} 2024-12-04T06:50:35,332 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-12-04T06:50:35,334 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=97ab18cb764195c6f193b8aac9e69a4a, ASSIGN}] 2024-12-04T06:50:35,337 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=97ab18cb764195c6f193b8aac9e69a4a, ASSIGN 2024-12-04T06:50:35,339 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=97ab18cb764195c6f193b8aac9e69a4a, ASSIGN; state=OFFLINE, location=607fd5c6574c,41055,1733295033031; forceNewPlan=false, retain=false 2024-12-04T06:50:35,491 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=97ab18cb764195c6f193b8aac9e69a4a, regionState=OPENING, regionLocation=607fd5c6574c,41055,1733295033031 2024-12-04T06:50:35,500 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=97ab18cb764195c6f193b8aac9e69a4a, ASSIGN because future has completed 2024-12-04T06:50:35,501 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 97ab18cb764195c6f193b8aac9e69a4a, server=607fd5c6574c,41055,1733295033031}] 2024-12-04T06:50:35,664 INFO [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1733295035219.97ab18cb764195c6f193b8aac9e69a4a. 2024-12-04T06:50:35,664 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 97ab18cb764195c6f193b8aac9e69a4a, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733295035219.97ab18cb764195c6f193b8aac9e69a4a.', STARTKEY => '', ENDKEY => ''} 2024-12-04T06:50:35,665 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 97ab18cb764195c6f193b8aac9e69a4a 2024-12-04T06:50:35,665 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733295035219.97ab18cb764195c6f193b8aac9e69a4a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:50:35,665 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 97ab18cb764195c6f193b8aac9e69a4a 2024-12-04T06:50:35,665 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 97ab18cb764195c6f193b8aac9e69a4a 2024-12-04T06:50:35,668 INFO [StoreOpener-97ab18cb764195c6f193b8aac9e69a4a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 97ab18cb764195c6f193b8aac9e69a4a 2024-12-04T06:50:35,670 INFO [StoreOpener-97ab18cb764195c6f193b8aac9e69a4a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 97ab18cb764195c6f193b8aac9e69a4a columnFamilyName info 2024-12-04T06:50:35,671 DEBUG [StoreOpener-97ab18cb764195c6f193b8aac9e69a4a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:50:35,672 INFO [StoreOpener-97ab18cb764195c6f193b8aac9e69a4a-1 {}] regionserver.HStore(327): Store=97ab18cb764195c6f193b8aac9e69a4a/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T06:50:35,672 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 97ab18cb764195c6f193b8aac9e69a4a 2024-12-04T06:50:35,673 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a 2024-12-04T06:50:35,674 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a 2024-12-04T06:50:35,674 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 97ab18cb764195c6f193b8aac9e69a4a 2024-12-04T06:50:35,675 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 97ab18cb764195c6f193b8aac9e69a4a 2024-12-04T06:50:35,677 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 97ab18cb764195c6f193b8aac9e69a4a 2024-12-04T06:50:35,681 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T06:50:35,682 INFO [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 97ab18cb764195c6f193b8aac9e69a4a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=870149, jitterRate=0.10645291209220886}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-04T06:50:35,682 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 97ab18cb764195c6f193b8aac9e69a4a 2024-12-04T06:50:35,683 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 97ab18cb764195c6f193b8aac9e69a4a: Running coprocessor pre-open hook at 1733295035665Writing region info on filesystem at 1733295035665Initializing all the Stores at 1733295035667 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295035667Cleaning up temporary data from old regions at 1733295035675 (+8 ms)Running coprocessor post-open hooks at 1733295035682 (+7 ms)Region opened successfully at 1733295035683 (+1 ms) 2024-12-04T06:50:35,685 INFO [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1733295035219.97ab18cb764195c6f193b8aac9e69a4a., pid=6, masterSystemTime=1733295035657 2024-12-04T06:50:35,689 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1733295035219.97ab18cb764195c6f193b8aac9e69a4a. 2024-12-04T06:50:35,689 INFO [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1733295035219.97ab18cb764195c6f193b8aac9e69a4a. 2024-12-04T06:50:35,690 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=97ab18cb764195c6f193b8aac9e69a4a, regionState=OPEN, openSeqNum=2, regionLocation=607fd5c6574c,41055,1733295033031 2024-12-04T06:50:35,694 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 97ab18cb764195c6f193b8aac9e69a4a, server=607fd5c6574c,41055,1733295033031 because future has completed 2024-12-04T06:50:35,704 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-04T06:50:35,704 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 97ab18cb764195c6f193b8aac9e69a4a, server=607fd5c6574c,41055,1733295033031 in 198 msec 2024-12-04T06:50:35,709 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-04T06:50:35,709 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=97ab18cb764195c6f193b8aac9e69a4a, ASSIGN in 370 msec 2024-12-04T06:50:35,711 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-04T06:50:35,711 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733295035711"}]},"ts":"1733295035711"} 2024-12-04T06:50:35,715 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-12-04T06:50:35,716 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-04T06:50:35,720 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 489 msec 2024-12-04T06:50:40,322 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-04T06:50:40,367 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-04T06:50:40,369 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-12-04T06:50:42,763 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-04T06:50:42,764 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-04T06:50:42,767 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-04T06:50:42,767 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-04T06:50:42,768 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-04T06:50:42,768 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-04T06:50:42,769 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-04T06:50:42,769 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-04T06:50:45,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40555 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-04T06:50:45,299 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-12-04T06:50:45,302 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-12-04T06:50:45,310 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-12-04T06:50:45,310 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1733295035219.97ab18cb764195c6f193b8aac9e69a4a. 2024-12-04T06:50:45,311 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C41055%2C1733295033031.1733295045311 2024-12-04T06:50:45,322 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:50:45,322 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:50:45,322 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:50:45,322 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:50:45,323 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:50:45,323 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295034326 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295045311 2024-12-04T06:50:45,326 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40691:40691),(127.0.0.1/127.0.0.1:46651:46651)] 2024-12-04T06:50:45,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741833_1009 (size=451) 2024-12-04T06:50:45,327 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295034326 is not closed yet, will try archiving it next time 2024-12-04T06:50:45,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33339 is added to blk_1073741833_1009 (size=451) 2024-12-04T06:50:45,329 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295034326 to hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/oldWALs/607fd5c6574c%2C41055%2C1733295033031.1733295034326 2024-12-04T06:50:45,341 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1733295035219.97ab18cb764195c6f193b8aac9e69a4a., hostname=607fd5c6574c,41055,1733295033031, seqNum=2] 2024-12-04T06:50:57,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41055 {}] regionserver.HRegion(8855): Flush requested on 97ab18cb764195c6f193b8aac9e69a4a 2024-12-04T06:50:57,381 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 97ab18cb764195c6f193b8aac9e69a4a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-04T06:50:57,462 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/.tmp/info/a7f2f92a1630445fa81af114657973a6 is 1080, key is row0001/info:/1733295045344/Put/seqid=0 2024-12-04T06:50:57,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741838_1014 (size=12509) 2024-12-04T06:50:57,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33339 is added to blk_1073741838_1014 (size=12509) 2024-12-04T06:50:57,478 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/.tmp/info/a7f2f92a1630445fa81af114657973a6 2024-12-04T06:50:57,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/.tmp/info/a7f2f92a1630445fa81af114657973a6 as hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/info/a7f2f92a1630445fa81af114657973a6 2024-12-04T06:50:57,544 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/info/a7f2f92a1630445fa81af114657973a6, entries=7, sequenceid=11, filesize=12.2 K 2024-12-04T06:50:57,554 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 97ab18cb764195c6f193b8aac9e69a4a in 172ms, sequenceid=11, compaction requested=false 2024-12-04T06:50:57,555 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 97ab18cb764195c6f193b8aac9e69a4a: 2024-12-04T06:51:01,432 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-04T06:51:05,389 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C41055%2C1733295033031.1733295065388 2024-12-04T06:51:05,597 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36347,DS-52869328-cce3-44d0-b0a1-5d2b389e2659,DISK], DatanodeInfoWithStorage[127.0.0.1:33339,DS-056361a8-3721-4704-ab3a-3447e456f9d5,DISK]] 2024-12-04T06:51:05,597 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:51:05,597 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:51:05,597 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:51:05,598 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:51:05,598 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:51:05,598 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295045311 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295065388 2024-12-04T06:51:05,599 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46651:46651),(127.0.0.1/127.0.0.1:40691:40691)] 2024-12-04T06:51:05,599 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295045311 is not closed yet, will try archiving it next time 2024-12-04T06:51:05,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33339 is added to blk_1073741837_1013 (size=12399) 2024-12-04T06:51:05,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741837_1013 (size=12399) 2024-12-04T06:51:05,803 INFO [FSHLog-0-hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95-prefix:607fd5c6574c,41055,1733295033031 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33339,DS-056361a8-3721-4704-ab3a-3447e456f9d5,DISK], DatanodeInfoWithStorage[127.0.0.1:36347,DS-52869328-cce3-44d0-b0a1-5d2b389e2659,DISK]] 2024-12-04T06:51:08,006 INFO [FSHLog-0-hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95-prefix:607fd5c6574c,41055,1733295033031 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33339,DS-056361a8-3721-4704-ab3a-3447e456f9d5,DISK], DatanodeInfoWithStorage[127.0.0.1:36347,DS-52869328-cce3-44d0-b0a1-5d2b389e2659,DISK]] 2024-12-04T06:51:10,211 INFO [FSHLog-0-hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95-prefix:607fd5c6574c,41055,1733295033031 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33339,DS-056361a8-3721-4704-ab3a-3447e456f9d5,DISK], DatanodeInfoWithStorage[127.0.0.1:36347,DS-52869328-cce3-44d0-b0a1-5d2b389e2659,DISK]] 2024-12-04T06:51:12,415 INFO [FSHLog-0-hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95-prefix:607fd5c6574c,41055,1733295033031 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33339,DS-056361a8-3721-4704-ab3a-3447e456f9d5,DISK], DatanodeInfoWithStorage[127.0.0.1:36347,DS-52869328-cce3-44d0-b0a1-5d2b389e2659,DISK]] 2024-12-04T06:51:12,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41055 {}] regionserver.HRegion(8855): Flush requested on 97ab18cb764195c6f193b8aac9e69a4a 2024-12-04T06:51:12,416 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 97ab18cb764195c6f193b8aac9e69a4a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-04T06:51:12,618 INFO [FSHLog-0-hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95-prefix:607fd5c6574c,41055,1733295033031 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33339,DS-056361a8-3721-4704-ab3a-3447e456f9d5,DISK], DatanodeInfoWithStorage[127.0.0.1:36347,DS-52869328-cce3-44d0-b0a1-5d2b389e2659,DISK]] 2024-12-04T06:51:12,623 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/.tmp/info/7ad7a14230794c7aba9deb6de134b4dc is 1080, key is row0008/info:/1733295059379/Put/seqid=0 2024-12-04T06:51:12,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33339 is added to blk_1073741840_1016 (size=12509) 2024-12-04T06:51:12,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741840_1016 (size=12509) 2024-12-04T06:51:12,632 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/.tmp/info/7ad7a14230794c7aba9deb6de134b4dc 2024-12-04T06:51:12,643 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/.tmp/info/7ad7a14230794c7aba9deb6de134b4dc as hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/info/7ad7a14230794c7aba9deb6de134b4dc 2024-12-04T06:51:12,653 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/info/7ad7a14230794c7aba9deb6de134b4dc, entries=7, sequenceid=21, filesize=12.2 K 2024-12-04T06:51:12,855 INFO [FSHLog-0-hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95-prefix:607fd5c6574c,41055,1733295033031 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33339,DS-056361a8-3721-4704-ab3a-3447e456f9d5,DISK], DatanodeInfoWithStorage[127.0.0.1:36347,DS-52869328-cce3-44d0-b0a1-5d2b389e2659,DISK]] 2024-12-04T06:51:12,855 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 97ab18cb764195c6f193b8aac9e69a4a in 439ms, sequenceid=21, compaction requested=false 2024-12-04T06:51:12,855 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 97ab18cb764195c6f193b8aac9e69a4a: 2024-12-04T06:51:12,855 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-12-04T06:51:12,855 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T06:51:12,856 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/info/a7f2f92a1630445fa81af114657973a6 because midkey is the same as first or last row 2024-12-04T06:51:14,619 INFO [FSHLog-0-hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95-prefix:607fd5c6574c,41055,1733295033031 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33339,DS-056361a8-3721-4704-ab3a-3447e456f9d5,DISK], DatanodeInfoWithStorage[127.0.0.1:36347,DS-52869328-cce3-44d0-b0a1-5d2b389e2659,DISK]] 2024-12-04T06:51:15,074 INFO [master/607fd5c6574c:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-04T06:51:15,074 INFO [master/607fd5c6574c:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-04T06:51:16,823 INFO [FSHLog-0-hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95-prefix:607fd5c6574c,41055,1733295033031 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33339,DS-056361a8-3721-4704-ab3a-3447e456f9d5,DISK], DatanodeInfoWithStorage[127.0.0.1:36347,DS-52869328-cce3-44d0-b0a1-5d2b389e2659,DISK]] 2024-12-04T06:51:16,826 WARN [FSHLog-0-hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95-prefix:607fd5c6574c,41055,1733295033031 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33339,DS-056361a8-3721-4704-ab3a-3447e456f9d5,DISK], DatanodeInfoWithStorage[127.0.0.1:36347,DS-52869328-cce3-44d0-b0a1-5d2b389e2659,DISK]] 2024-12-04T06:51:16,827 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 607fd5c6574c%2C41055%2C1733295033031:(num 1733295065388) roll requested 2024-12-04T06:51:16,827 INFO [regionserver/607fd5c6574c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C41055%2C1733295033031.1733295076827 2024-12-04T06:51:17,035 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33339,DS-056361a8-3721-4704-ab3a-3447e456f9d5,DISK], DatanodeInfoWithStorage[127.0.0.1:36347,DS-52869328-cce3-44d0-b0a1-5d2b389e2659,DISK]] 2024-12-04T06:51:17,036 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:51:17,036 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:51:17,036 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:51:17,036 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:51:17,036 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:51:17,037 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295065388 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295076827 2024-12-04T06:51:17,037 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40691:40691),(127.0.0.1/127.0.0.1:46651:46651)] 2024-12-04T06:51:17,038 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295065388 is not closed yet, will try archiving it next time 2024-12-04T06:51:17,038 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295045311 to hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/oldWALs/607fd5c6574c%2C41055%2C1733295033031.1733295045311 2024-12-04T06:51:17,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741839_1015 (size=7739) 2024-12-04T06:51:17,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33339 is added to blk_1073741839_1015 (size=7739) 2024-12-04T06:51:19,028 INFO [FSHLog-0-hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95-prefix:607fd5c6574c,41055,1733295033031 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36347,DS-52869328-cce3-44d0-b0a1-5d2b389e2659,DISK], DatanodeInfoWithStorage[127.0.0.1:33339,DS-056361a8-3721-4704-ab3a-3447e456f9d5,DISK]] 2024-12-04T06:51:20,665 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 97ab18cb764195c6f193b8aac9e69a4a, had cached 0 bytes from a total of 25018 2024-12-04T06:51:21,232 INFO [FSHLog-0-hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95-prefix:607fd5c6574c,41055,1733295033031 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36347,DS-52869328-cce3-44d0-b0a1-5d2b389e2659,DISK], DatanodeInfoWithStorage[127.0.0.1:33339,DS-056361a8-3721-4704-ab3a-3447e456f9d5,DISK]] 2024-12-04T06:51:23,437 INFO [FSHLog-0-hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95-prefix:607fd5c6574c,41055,1733295033031 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36347,DS-52869328-cce3-44d0-b0a1-5d2b389e2659,DISK], DatanodeInfoWithStorage[127.0.0.1:33339,DS-056361a8-3721-4704-ab3a-3447e456f9d5,DISK]] 2024-12-04T06:51:25,641 INFO [FSHLog-0-hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95-prefix:607fd5c6574c,41055,1733295033031 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36347,DS-52869328-cce3-44d0-b0a1-5d2b389e2659,DISK], DatanodeInfoWithStorage[127.0.0.1:33339,DS-056361a8-3721-4704-ab3a-3447e456f9d5,DISK]] 2024-12-04T06:51:27,643 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-04T06:51:27,644 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C41055%2C1733295033031.1733295087644 2024-12-04T06:51:31,432 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-04T06:51:32,655 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5008 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36347,DS-52869328-cce3-44d0-b0a1-5d2b389e2659,DISK], DatanodeInfoWithStorage[127.0.0.1:33339,DS-056361a8-3721-4704-ab3a-3447e456f9d5,DISK]] 2024-12-04T06:51:32,657 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5008 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36347,DS-52869328-cce3-44d0-b0a1-5d2b389e2659,DISK], DatanodeInfoWithStorage[127.0.0.1:33339,DS-056361a8-3721-4704-ab3a-3447e456f9d5,DISK]] 2024-12-04T06:51:32,658 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 607fd5c6574c%2C41055%2C1733295033031:(num 1733295087644) roll requested 2024-12-04T06:51:32,658 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:51:32,658 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:51:32,658 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:51:32,658 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:51:32,658 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:51:32,659 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295076827 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295087644 2024-12-04T06:51:32,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33339 is added to blk_1073741841_1017 (size=4753) 2024-12-04T06:51:32,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741841_1017 (size=4753) 2024-12-04T06:51:32,668 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40691:40691),(127.0.0.1/127.0.0.1:46651:46651)] 2024-12-04T06:51:32,669 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295076827 is not closed yet, will try archiving it next time 2024-12-04T06:51:32,669 INFO [regionserver/607fd5c6574c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C41055%2C1733295033031.1733295092669 2024-12-04T06:51:37,672 INFO [FSHLog-0-hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95-prefix:607fd5c6574c,41055,1733295033031 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36347,DS-52869328-cce3-44d0-b0a1-5d2b389e2659,DISK], DatanodeInfoWithStorage[127.0.0.1:33339,DS-056361a8-3721-4704-ab3a-3447e456f9d5,DISK]] 2024-12-04T06:51:37,672 WARN [FSHLog-0-hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95-prefix:607fd5c6574c,41055,1733295033031 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36347,DS-52869328-cce3-44d0-b0a1-5d2b389e2659,DISK], DatanodeInfoWithStorage[127.0.0.1:33339,DS-056361a8-3721-4704-ab3a-3447e456f9d5,DISK]] 2024-12-04T06:51:37,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41055 {}] regionserver.HRegion(8855): Flush requested on 97ab18cb764195c6f193b8aac9e69a4a 2024-12-04T06:51:37,673 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 97ab18cb764195c6f193b8aac9e69a4a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-04T06:51:37,677 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36347,DS-52869328-cce3-44d0-b0a1-5d2b389e2659,DISK], DatanodeInfoWithStorage[127.0.0.1:33339,DS-056361a8-3721-4704-ab3a-3447e456f9d5,DISK]] 2024-12-04T06:51:37,677 WARN [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36347,DS-52869328-cce3-44d0-b0a1-5d2b389e2659,DISK], DatanodeInfoWithStorage[127.0.0.1:33339,DS-056361a8-3721-4704-ab3a-3447e456f9d5,DISK]] 2024-12-04T06:51:39,674 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-04T06:51:42,675 INFO [FSHLog-0-hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95-prefix:607fd5c6574c,41055,1733295033031 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36347,DS-52869328-cce3-44d0-b0a1-5d2b389e2659,DISK], DatanodeInfoWithStorage[127.0.0.1:33339,DS-056361a8-3721-4704-ab3a-3447e456f9d5,DISK]] 2024-12-04T06:51:42,675 WARN [FSHLog-0-hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95-prefix:607fd5c6574c,41055,1733295033031 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36347,DS-52869328-cce3-44d0-b0a1-5d2b389e2659,DISK], DatanodeInfoWithStorage[127.0.0.1:33339,DS-056361a8-3721-4704-ab3a-3447e456f9d5,DISK]] 2024-12-04T06:51:42,676 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:51:42,676 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:51:42,676 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:51:42,676 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:51:42,676 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:51:42,677 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295087644 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295092669 2024-12-04T06:51:42,677 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40691:40691),(127.0.0.1/127.0.0.1:46651:46651)] 2024-12-04T06:51:42,678 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295087644 is not closed yet, will try archiving it next time 2024-12-04T06:51:42,678 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 607fd5c6574c%2C41055%2C1733295033031:(num 1733295092669) roll requested 2024-12-04T06:51:42,678 INFO [regionserver/607fd5c6574c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C41055%2C1733295033031.1733295102678 2024-12-04T06:51:42,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33339 is added to blk_1073741842_1018 (size=1569) 2024-12-04T06:51:42,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741842_1018 (size=1569) 2024-12-04T06:51:42,681 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/.tmp/info/95fe7112d02343caaaf8a7e2b15e4bf2 is 1080, key is row0015/info:/1733295074418/Put/seqid=0 2024-12-04T06:51:42,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33339 is added to blk_1073741844_1020 (size=12509) 2024-12-04T06:51:42,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741844_1020 (size=12509) 2024-12-04T06:51:42,689 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/.tmp/info/95fe7112d02343caaaf8a7e2b15e4bf2 2024-12-04T06:51:42,698 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/.tmp/info/95fe7112d02343caaaf8a7e2b15e4bf2 as hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/info/95fe7112d02343caaaf8a7e2b15e4bf2 2024-12-04T06:51:42,706 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/info/95fe7112d02343caaaf8a7e2b15e4bf2, entries=7, sequenceid=31, filesize=12.2 K 2024-12-04T06:51:47,687 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36347,DS-52869328-cce3-44d0-b0a1-5d2b389e2659,DISK], DatanodeInfoWithStorage[127.0.0.1:33339,DS-056361a8-3721-4704-ab3a-3447e456f9d5,DISK]] 2024-12-04T06:51:47,688 WARN [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36347,DS-52869328-cce3-44d0-b0a1-5d2b389e2659,DISK], DatanodeInfoWithStorage[127.0.0.1:33339,DS-056361a8-3721-4704-ab3a-3447e456f9d5,DISK]] 2024-12-04T06:51:47,708 INFO [FSHLog-0-hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95-prefix:607fd5c6574c,41055,1733295033031 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36347,DS-52869328-cce3-44d0-b0a1-5d2b389e2659,DISK], DatanodeInfoWithStorage[127.0.0.1:33339,DS-056361a8-3721-4704-ab3a-3447e456f9d5,DISK]] 2024-12-04T06:51:47,708 WARN [FSHLog-0-hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95-prefix:607fd5c6574c,41055,1733295033031 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36347,DS-52869328-cce3-44d0-b0a1-5d2b389e2659,DISK], DatanodeInfoWithStorage[127.0.0.1:33339,DS-056361a8-3721-4704-ab3a-3447e456f9d5,DISK]] 2024-12-04T06:51:47,708 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 97ab18cb764195c6f193b8aac9e69a4a in 10035ms, sequenceid=31, compaction requested=true 2024-12-04T06:51:47,708 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:51:47,708 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:51:47,708 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 97ab18cb764195c6f193b8aac9e69a4a: 2024-12-04T06:51:47,708 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:51:47,708 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:51:47,708 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-12-04T06:51:47,708 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T06:51:47,708 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:51:47,709 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/info/a7f2f92a1630445fa81af114657973a6 because midkey is the same as first or last row 2024-12-04T06:51:47,709 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295092669 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295102678 2024-12-04T06:51:47,710 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46651:46651),(127.0.0.1/127.0.0.1:40691:40691)] 2024-12-04T06:51:47,710 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295092669 is not closed yet, will try archiving it next time 2024-12-04T06:51:47,710 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295065388 to hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/oldWALs/607fd5c6574c%2C41055%2C1733295033031.1733295065388 2024-12-04T06:51:47,710 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 607fd5c6574c%2C41055%2C1733295033031:(num 1733295107710) roll requested 2024-12-04T06:51:47,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 97ab18cb764195c6f193b8aac9e69a4a:info, priority=-2147483648, current under compaction store size is 1 2024-12-04T06:51:47,710 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C41055%2C1733295033031.1733295107710 2024-12-04T06:51:47,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33339 is added to blk_1073741843_1019 (size=438) 2024-12-04T06:51:47,712 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295076827 to hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/oldWALs/607fd5c6574c%2C41055%2C1733295033031.1733295076827 2024-12-04T06:51:47,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741843_1019 (size=438) 2024-12-04T06:51:47,713 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T06:51:47,714 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295087644 to hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/oldWALs/607fd5c6574c%2C41055%2C1733295033031.1733295087644 2024-12-04T06:51:47,715 DEBUG [RS:0;607fd5c6574c:41055-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T06:51:47,717 DEBUG [RS:0;607fd5c6574c:41055-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T06:51:47,718 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:51:47,719 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:51:47,719 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:51:47,719 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:51:47,719 DEBUG [RS:0;607fd5c6574c:41055-shortCompactions-0 {}] regionserver.HStore(1541): 97ab18cb764195c6f193b8aac9e69a4a/info is initiating minor compaction (all files) 2024-12-04T06:51:47,719 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:51:47,719 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295102678 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295107710 2024-12-04T06:51:47,719 INFO [RS:0;607fd5c6574c:41055-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 97ab18cb764195c6f193b8aac9e69a4a/info in TestLogRolling-testSlowSyncLogRolling,,1733295035219.97ab18cb764195c6f193b8aac9e69a4a. 2024-12-04T06:51:47,720 INFO [RS:0;607fd5c6574c:41055-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/info/a7f2f92a1630445fa81af114657973a6, hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/info/7ad7a14230794c7aba9deb6de134b4dc, hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/info/95fe7112d02343caaaf8a7e2b15e4bf2] into tmpdir=hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/.tmp, totalSize=36.6 K 2024-12-04T06:51:47,720 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40691:40691),(127.0.0.1/127.0.0.1:46651:46651)] 2024-12-04T06:51:47,720 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295092669 is not closed yet, will try archiving it next time 2024-12-04T06:51:47,720 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295102678 is not closed yet, will try archiving it next time 2024-12-04T06:51:47,721 INFO [regionserver/607fd5c6574c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C41055%2C1733295033031.1733295107720 2024-12-04T06:51:47,721 DEBUG [RS:0;607fd5c6574c:41055-shortCompactions-0 {}] compactions.Compactor(225): Compacting a7f2f92a1630445fa81af114657973a6, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733295045344 2024-12-04T06:51:47,722 DEBUG [RS:0;607fd5c6574c:41055-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7ad7a14230794c7aba9deb6de134b4dc, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1733295059379 2024-12-04T06:51:47,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741845_1021 (size=93) 2024-12-04T06:51:47,723 DEBUG [RS:0;607fd5c6574c:41055-shortCompactions-0 {}] compactions.Compactor(225): Compacting 95fe7112d02343caaaf8a7e2b15e4bf2, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1733295074418 2024-12-04T06:51:47,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33339 is added to blk_1073741845_1021 (size=93) 2024-12-04T06:51:47,733 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:51:47,733 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:51:47,733 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:51:47,733 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:51:47,733 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:51:47,734 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295107710 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295107720 2024-12-04T06:51:47,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33339 is added to blk_1073741846_1022 (size=1258) 2024-12-04T06:51:47,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741846_1022 (size=1258) 2024-12-04T06:51:47,737 DEBUG [Close-WAL-Writer-2 {}] wal.AbstractFSWAL(879): hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295092669 is not closed yet, will try archiving it next time 2024-12-04T06:51:47,737 DEBUG [Close-WAL-Writer-2 {}] wal.AbstractFSWAL(879): hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295102678 is not closed yet, will try archiving it next time 2024-12-04T06:51:47,742 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40691:40691),(127.0.0.1/127.0.0.1:46651:46651)] 2024-12-04T06:51:47,742 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295092669 is not closed yet, will try archiving it next time 2024-12-04T06:51:47,742 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295102678 is not closed yet, will try archiving it next time 2024-12-04T06:51:47,757 INFO [RS:0;607fd5c6574c:41055-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 97ab18cb764195c6f193b8aac9e69a4a#info#compaction#3 average throughput is 7.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T06:51:47,758 DEBUG [RS:0;607fd5c6574c:41055-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/.tmp/info/7f6af23102ae4ac084bb649cc96c90dd is 1080, key is row0001/info:/1733295045344/Put/seqid=0 2024-12-04T06:51:47,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33339 is added to blk_1073741848_1024 (size=27710) 2024-12-04T06:51:47,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741848_1024 (size=27710) 2024-12-04T06:51:47,778 DEBUG [RS:0;607fd5c6574c:41055-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/.tmp/info/7f6af23102ae4ac084bb649cc96c90dd as hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/info/7f6af23102ae4ac084bb649cc96c90dd 2024-12-04T06:51:47,795 INFO [RS:0;607fd5c6574c:41055-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 97ab18cb764195c6f193b8aac9e69a4a/info of 97ab18cb764195c6f193b8aac9e69a4a into 7f6af23102ae4ac084bb649cc96c90dd(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T06:51:47,795 DEBUG [RS:0;607fd5c6574c:41055-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 97ab18cb764195c6f193b8aac9e69a4a: 2024-12-04T06:51:47,798 INFO [RS:0;607fd5c6574c:41055-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1733295035219.97ab18cb764195c6f193b8aac9e69a4a., storeName=97ab18cb764195c6f193b8aac9e69a4a/info, priority=13, startTime=1733295107710; duration=0sec 2024-12-04T06:51:47,798 DEBUG [RS:0;607fd5c6574c:41055-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-04T06:51:47,798 DEBUG [RS:0;607fd5c6574c:41055-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T06:51:47,798 DEBUG [RS:0;607fd5c6574c:41055-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/info/7f6af23102ae4ac084bb649cc96c90dd because midkey is the same as first or last row 2024-12-04T06:51:47,798 DEBUG [RS:0;607fd5c6574c:41055-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-04T06:51:47,798 DEBUG [RS:0;607fd5c6574c:41055-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T06:51:47,799 DEBUG [RS:0;607fd5c6574c:41055-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/info/7f6af23102ae4ac084bb649cc96c90dd because midkey is the same as first or last row 2024-12-04T06:51:47,799 DEBUG [RS:0;607fd5c6574c:41055-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-04T06:51:47,799 DEBUG [RS:0;607fd5c6574c:41055-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T06:51:47,799 DEBUG [RS:0;607fd5c6574c:41055-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/info/7f6af23102ae4ac084bb649cc96c90dd because midkey is the same as first or last row 2024-12-04T06:51:47,799 DEBUG [RS:0;607fd5c6574c:41055-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T06:51:47,799 DEBUG [RS:0;607fd5c6574c:41055-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 97ab18cb764195c6f193b8aac9e69a4a:info 2024-12-04T06:51:48,113 DEBUG [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(879): hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295102678 is not closed yet, will try archiving it next time 2024-12-04T06:51:48,114 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295092669 to hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/oldWALs/607fd5c6574c%2C41055%2C1733295033031.1733295092669 2024-12-04T06:51:48,124 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/WALs/607fd5c6574c,41055,1733295033031/607fd5c6574c%2C41055%2C1733295033031.1733295102678 to hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/oldWALs/607fd5c6574c%2C41055%2C1733295033031.1733295102678 2024-12-04T06:51:59,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41055 {}] regionserver.HRegion(8855): Flush requested on 97ab18cb764195c6f193b8aac9e69a4a 2024-12-04T06:51:59,749 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 97ab18cb764195c6f193b8aac9e69a4a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-04T06:51:59,757 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/.tmp/info/739014516cc84ad881159ef18ac49265 is 1080, key is row0022/info:/1733295107722/Put/seqid=0 2024-12-04T06:51:59,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33339 is added to blk_1073741849_1025 (size=12509) 2024-12-04T06:51:59,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741849_1025 (size=12509) 2024-12-04T06:51:59,765 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/.tmp/info/739014516cc84ad881159ef18ac49265 2024-12-04T06:51:59,774 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/.tmp/info/739014516cc84ad881159ef18ac49265 as hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/info/739014516cc84ad881159ef18ac49265 2024-12-04T06:51:59,782 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/info/739014516cc84ad881159ef18ac49265, entries=7, sequenceid=42, filesize=12.2 K 2024-12-04T06:51:59,783 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 97ab18cb764195c6f193b8aac9e69a4a in 34ms, sequenceid=42, compaction requested=false 2024-12-04T06:51:59,783 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 97ab18cb764195c6f193b8aac9e69a4a: 2024-12-04T06:51:59,783 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-12-04T06:51:59,783 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T06:51:59,783 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/info/7f6af23102ae4ac084bb649cc96c90dd because midkey is the same as first or last row 2024-12-04T06:52:01,433 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-04T06:52:05,666 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 97ab18cb764195c6f193b8aac9e69a4a, had cached 0 bytes from a total of 40219 2024-12-04T06:52:07,763 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-04T06:52:07,763 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-04T06:52:07,764 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T06:52:07,769 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:52:07,769 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:52:07,769 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-04T06:52:07,769 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-04T06:52:07,770 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=131935351, stopped=false 2024-12-04T06:52:07,770 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=607fd5c6574c,40555,1733295032348 2024-12-04T06:52:07,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40555-0x1017c3d2f610000, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T06:52:07,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41055-0x1017c3d2f610001, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T06:52:07,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40555-0x1017c3d2f610000, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:07,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41055-0x1017c3d2f610001, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:07,777 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-04T06:52:07,777 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-04T06:52:07,777 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T06:52:07,778 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:52:07,778 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41055-0x1017c3d2f610001, quorum=127.0.0.1:56093, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T06:52:07,778 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40555-0x1017c3d2f610000, quorum=127.0.0.1:56093, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T06:52:07,778 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '607fd5c6574c,41055,1733295033031' ***** 2024-12-04T06:52:07,778 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-04T06:52:07,778 INFO [RS:0;607fd5c6574c:41055 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-04T06:52:07,779 INFO [RS:0;607fd5c6574c:41055 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-04T06:52:07,779 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-04T06:52:07,779 INFO [RS:0;607fd5c6574c:41055 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-04T06:52:07,779 INFO [RS:0;607fd5c6574c:41055 {}] regionserver.HRegionServer(3091): Received CLOSE for 97ab18cb764195c6f193b8aac9e69a4a 2024-12-04T06:52:07,780 INFO [RS:0;607fd5c6574c:41055 {}] regionserver.HRegionServer(959): stopping server 607fd5c6574c,41055,1733295033031 2024-12-04T06:52:07,780 INFO [RS:0;607fd5c6574c:41055 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T06:52:07,780 INFO [RS:0;607fd5c6574c:41055 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;607fd5c6574c:41055. 2024-12-04T06:52:07,780 DEBUG [RS:0;607fd5c6574c:41055 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T06:52:07,780 DEBUG [RS:0;607fd5c6574c:41055 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:52:07,780 INFO [RS:0;607fd5c6574c:41055 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-04T06:52:07,780 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 97ab18cb764195c6f193b8aac9e69a4a, disabling compactions & flushes 2024-12-04T06:52:07,780 INFO [RS:0;607fd5c6574c:41055 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-04T06:52:07,780 INFO [RS:0;607fd5c6574c:41055 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-04T06:52:07,780 INFO [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733295035219.97ab18cb764195c6f193b8aac9e69a4a. 2024-12-04T06:52:07,780 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733295035219.97ab18cb764195c6f193b8aac9e69a4a. 2024-12-04T06:52:07,780 INFO [RS:0;607fd5c6574c:41055 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-04T06:52:07,780 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733295035219.97ab18cb764195c6f193b8aac9e69a4a. after waiting 0 ms 2024-12-04T06:52:07,780 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733295035219.97ab18cb764195c6f193b8aac9e69a4a. 2024-12-04T06:52:07,781 INFO [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 97ab18cb764195c6f193b8aac9e69a4a 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-12-04T06:52:07,781 INFO [RS:0;607fd5c6574c:41055 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-04T06:52:07,781 DEBUG [RS:0;607fd5c6574c:41055 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 97ab18cb764195c6f193b8aac9e69a4a=TestLogRolling-testSlowSyncLogRolling,,1733295035219.97ab18cb764195c6f193b8aac9e69a4a.} 2024-12-04T06:52:07,781 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-04T06:52:07,781 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-04T06:52:07,781 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-04T06:52:07,781 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-04T06:52:07,781 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-04T06:52:07,781 DEBUG [RS:0;607fd5c6574c:41055 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 97ab18cb764195c6f193b8aac9e69a4a 2024-12-04T06:52:07,781 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-12-04T06:52:07,787 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/.tmp/info/0b5de8fc6b28415d94328eda3744f70b is 1080, key is row0029/info:/1733295121751/Put/seqid=0 2024-12-04T06:52:07,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33339 is added to blk_1073741850_1026 (size=8193) 2024-12-04T06:52:07,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741850_1026 (size=8193) 2024-12-04T06:52:07,795 INFO [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/.tmp/info/0b5de8fc6b28415d94328eda3744f70b 2024-12-04T06:52:07,803 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/hbase/meta/1588230740/.tmp/info/427417c9d71840cabe8352e7650f52da is 195, key is TestLogRolling-testSlowSyncLogRolling,,1733295035219.97ab18cb764195c6f193b8aac9e69a4a./info:regioninfo/1733295035690/Put/seqid=0 2024-12-04T06:52:07,805 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/.tmp/info/0b5de8fc6b28415d94328eda3744f70b as hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/info/0b5de8fc6b28415d94328eda3744f70b 2024-12-04T06:52:07,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33339 is added to blk_1073741851_1027 (size=7016) 2024-12-04T06:52:07,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741851_1027 (size=7016) 2024-12-04T06:52:07,811 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/hbase/meta/1588230740/.tmp/info/427417c9d71840cabe8352e7650f52da 2024-12-04T06:52:07,813 INFO [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/info/0b5de8fc6b28415d94328eda3744f70b, entries=3, sequenceid=48, filesize=8.0 K 2024-12-04T06:52:07,815 INFO [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 97ab18cb764195c6f193b8aac9e69a4a in 35ms, sequenceid=48, compaction requested=true 2024-12-04T06:52:07,815 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733295035219.97ab18cb764195c6f193b8aac9e69a4a.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/info/a7f2f92a1630445fa81af114657973a6, hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/info/7ad7a14230794c7aba9deb6de134b4dc, hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/info/95fe7112d02343caaaf8a7e2b15e4bf2] to archive 2024-12-04T06:52:07,818 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733295035219.97ab18cb764195c6f193b8aac9e69a4a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-04T06:52:07,822 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733295035219.97ab18cb764195c6f193b8aac9e69a4a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/info/a7f2f92a1630445fa81af114657973a6 to hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/archive/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/info/a7f2f92a1630445fa81af114657973a6 2024-12-04T06:52:07,824 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733295035219.97ab18cb764195c6f193b8aac9e69a4a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/info/7ad7a14230794c7aba9deb6de134b4dc to hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/archive/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/info/7ad7a14230794c7aba9deb6de134b4dc 2024-12-04T06:52:07,825 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733295035219.97ab18cb764195c6f193b8aac9e69a4a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/info/95fe7112d02343caaaf8a7e2b15e4bf2 to hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/archive/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/info/95fe7112d02343caaaf8a7e2b15e4bf2 2024-12-04T06:52:07,836 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/hbase/meta/1588230740/.tmp/ns/ef1ed145f0b54d11b405182a384aac29 is 43, key is default/ns:d/1733295034967/Put/seqid=0 2024-12-04T06:52:07,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741852_1028 (size=5153) 2024-12-04T06:52:07,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33339 is added to blk_1073741852_1028 (size=5153) 2024-12-04T06:52:07,843 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/hbase/meta/1588230740/.tmp/ns/ef1ed145f0b54d11b405182a384aac29 2024-12-04T06:52:07,838 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733295035219.97ab18cb764195c6f193b8aac9e69a4a.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=607fd5c6574c:40555 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-04T06:52:07,844 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733295035219.97ab18cb764195c6f193b8aac9e69a4a.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [a7f2f92a1630445fa81af114657973a6=12509, 7ad7a14230794c7aba9deb6de134b4dc=12509, 95fe7112d02343caaaf8a7e2b15e4bf2=12509] 2024-12-04T06:52:07,850 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/default/TestLogRolling-testSlowSyncLogRolling/97ab18cb764195c6f193b8aac9e69a4a/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-12-04T06:52:07,853 INFO [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733295035219.97ab18cb764195c6f193b8aac9e69a4a. 2024-12-04T06:52:07,853 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 97ab18cb764195c6f193b8aac9e69a4a: Waiting for close lock at 1733295127780Running coprocessor pre-close hooks at 1733295127780Disabling compacts and flushes for region at 1733295127780Disabling writes for close at 1733295127780Obtaining lock to block concurrent updates at 1733295127781 (+1 ms)Preparing flush snapshotting stores in 97ab18cb764195c6f193b8aac9e69a4a at 1733295127781Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1733295035219.97ab18cb764195c6f193b8aac9e69a4a., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1733295127781Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1733295035219.97ab18cb764195c6f193b8aac9e69a4a. at 1733295127782 (+1 ms)Flushing 97ab18cb764195c6f193b8aac9e69a4a/info: creating writer at 1733295127782Flushing 97ab18cb764195c6f193b8aac9e69a4a/info: appending metadata at 1733295127787 (+5 ms)Flushing 97ab18cb764195c6f193b8aac9e69a4a/info: closing flushed file at 1733295127787Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@353a6b99: reopening flushed file at 1733295127804 (+17 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 97ab18cb764195c6f193b8aac9e69a4a in 35ms, sequenceid=48, compaction requested=true at 1733295127815 (+11 ms)Writing region close event to WAL at 1733295127845 (+30 ms)Running coprocessor post-close hooks at 1733295127851 (+6 ms)Closed at 1733295127853 (+2 ms) 2024-12-04T06:52:07,853 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1733295035219.97ab18cb764195c6f193b8aac9e69a4a. 2024-12-04T06:52:07,868 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/hbase/meta/1588230740/.tmp/table/bb6cd1d837134d2b9885f784f36cfa75 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1733295035711/Put/seqid=0 2024-12-04T06:52:07,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741853_1029 (size=5396) 2024-12-04T06:52:07,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33339 is added to blk_1073741853_1029 (size=5396) 2024-12-04T06:52:07,876 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/hbase/meta/1588230740/.tmp/table/bb6cd1d837134d2b9885f784f36cfa75 2024-12-04T06:52:07,884 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/hbase/meta/1588230740/.tmp/info/427417c9d71840cabe8352e7650f52da as hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/hbase/meta/1588230740/info/427417c9d71840cabe8352e7650f52da 2024-12-04T06:52:07,893 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/hbase/meta/1588230740/info/427417c9d71840cabe8352e7650f52da, entries=10, sequenceid=11, filesize=6.9 K 2024-12-04T06:52:07,894 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/hbase/meta/1588230740/.tmp/ns/ef1ed145f0b54d11b405182a384aac29 as hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/hbase/meta/1588230740/ns/ef1ed145f0b54d11b405182a384aac29 2024-12-04T06:52:07,901 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/hbase/meta/1588230740/ns/ef1ed145f0b54d11b405182a384aac29, entries=2, sequenceid=11, filesize=5.0 K 2024-12-04T06:52:07,903 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/hbase/meta/1588230740/.tmp/table/bb6cd1d837134d2b9885f784f36cfa75 as hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/hbase/meta/1588230740/table/bb6cd1d837134d2b9885f784f36cfa75 2024-12-04T06:52:07,910 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/hbase/meta/1588230740/table/bb6cd1d837134d2b9885f784f36cfa75, entries=2, sequenceid=11, filesize=5.3 K 2024-12-04T06:52:07,911 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 130ms, sequenceid=11, compaction requested=false 2024-12-04T06:52:07,917 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-04T06:52:07,918 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-04T06:52:07,918 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-04T06:52:07,919 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733295127781Running coprocessor pre-close hooks at 1733295127781Disabling compacts and flushes for region at 1733295127781Disabling writes for close at 1733295127781Obtaining lock to block concurrent updates at 1733295127781Preparing flush snapshotting stores in 1588230740 at 1733295127781Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1733295127782 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733295127782Flushing 1588230740/info: creating writer at 1733295127783 (+1 ms)Flushing 1588230740/info: appending metadata at 1733295127803 (+20 ms)Flushing 1588230740/info: closing flushed file at 1733295127803Flushing 1588230740/ns: creating writer at 1733295127819 (+16 ms)Flushing 1588230740/ns: appending metadata at 1733295127835 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1733295127835Flushing 1588230740/table: creating writer at 1733295127852 (+17 ms)Flushing 1588230740/table: appending metadata at 1733295127868 (+16 ms)Flushing 1588230740/table: closing flushed file at 1733295127868Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6573862a: reopening flushed file at 1733295127883 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@603c4d5f: reopening flushed file at 1733295127893 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@583df9e1: reopening flushed file at 1733295127902 (+9 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 130ms, sequenceid=11, compaction requested=false at 1733295127911 (+9 ms)Writing region close event to WAL at 1733295127913 (+2 ms)Running coprocessor post-close hooks at 1733295127918 (+5 ms)Closed at 1733295127918 2024-12-04T06:52:07,919 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-04T06:52:07,981 INFO [RS:0;607fd5c6574c:41055 {}] regionserver.HRegionServer(976): stopping server 607fd5c6574c,41055,1733295033031; all regions closed. 2024-12-04T06:52:07,983 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:07,983 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:07,983 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:07,984 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:07,984 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:07,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33339 is added to blk_1073741834_1010 (size=3066) 2024-12-04T06:52:07,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741834_1010 (size=3066) 2024-12-04T06:52:07,990 DEBUG [RS:0;607fd5c6574c:41055 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/oldWALs 2024-12-04T06:52:07,990 INFO [RS:0;607fd5c6574c:41055 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 607fd5c6574c%2C41055%2C1733295033031.meta:.meta(num 1733295034781) 2024-12-04T06:52:07,990 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:07,991 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:07,991 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:07,991 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:07,991 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:07,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33339 is added to blk_1073741847_1023 (size=12695) 2024-12-04T06:52:07,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741847_1023 (size=12695) 2024-12-04T06:52:07,997 DEBUG [RS:0;607fd5c6574c:41055 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/oldWALs 2024-12-04T06:52:07,997 INFO [RS:0;607fd5c6574c:41055 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 607fd5c6574c%2C41055%2C1733295033031:(num 1733295107720) 2024-12-04T06:52:07,997 DEBUG [RS:0;607fd5c6574c:41055 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:52:07,997 INFO [RS:0;607fd5c6574c:41055 {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T06:52:07,997 INFO [RS:0;607fd5c6574c:41055 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T06:52:07,997 INFO [RS:0;607fd5c6574c:41055 {}] hbase.ChoreService(370): Chore service for: regionserver/607fd5c6574c:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-04T06:52:07,997 INFO [RS:0;607fd5c6574c:41055 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T06:52:07,998 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T06:52:07,998 INFO [RS:0;607fd5c6574c:41055 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41055 2024-12-04T06:52:08,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41055-0x1017c3d2f610001, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/607fd5c6574c,41055,1733295033031 2024-12-04T06:52:08,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40555-0x1017c3d2f610000, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T06:52:08,003 INFO [RS:0;607fd5c6574c:41055 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T06:52:08,004 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [607fd5c6574c,41055,1733295033031] 2024-12-04T06:52:08,007 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/607fd5c6574c,41055,1733295033031 already deleted, retry=false 2024-12-04T06:52:08,007 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 607fd5c6574c,41055,1733295033031 expired; onlineServers=0 2024-12-04T06:52:08,007 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '607fd5c6574c,40555,1733295032348' ***** 2024-12-04T06:52:08,007 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-04T06:52:08,007 INFO [M:0;607fd5c6574c:40555 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T06:52:08,007 INFO [M:0;607fd5c6574c:40555 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T06:52:08,007 DEBUG [M:0;607fd5c6574c:40555 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-04T06:52:08,008 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-04T06:52:08,008 DEBUG [M:0;607fd5c6574c:40555 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-04T06:52:08,008 DEBUG [master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.large.0-1733295033990 {}] cleaner.HFileCleaner(306): Exit Thread[master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.large.0-1733295033990,5,FailOnTimeoutGroup] 2024-12-04T06:52:08,008 DEBUG [master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.small.0-1733295033991 {}] cleaner.HFileCleaner(306): Exit Thread[master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.small.0-1733295033991,5,FailOnTimeoutGroup] 2024-12-04T06:52:08,008 INFO [M:0;607fd5c6574c:40555 {}] hbase.ChoreService(370): Chore service for: master/607fd5c6574c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-04T06:52:08,008 INFO [M:0;607fd5c6574c:40555 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T06:52:08,008 DEBUG [M:0;607fd5c6574c:40555 {}] master.HMaster(1795): Stopping service threads 2024-12-04T06:52:08,008 INFO [M:0;607fd5c6574c:40555 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-04T06:52:08,008 INFO [M:0;607fd5c6574c:40555 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-04T06:52:08,009 INFO [M:0;607fd5c6574c:40555 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-04T06:52:08,009 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-04T06:52:08,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40555-0x1017c3d2f610000, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-04T06:52:08,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40555-0x1017c3d2f610000, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:08,013 DEBUG [M:0;607fd5c6574c:40555 {}] zookeeper.ZKUtil(347): master:40555-0x1017c3d2f610000, quorum=127.0.0.1:56093, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-04T06:52:08,013 WARN [M:0;607fd5c6574c:40555 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-04T06:52:08,013 INFO [M:0;607fd5c6574c:40555 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/.lastflushedseqids 2024-12-04T06:52:08,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741854_1030 (size=130) 2024-12-04T06:52:08,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33339 is added to blk_1073741854_1030 (size=130) 2024-12-04T06:52:08,027 INFO [M:0;607fd5c6574c:40555 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-04T06:52:08,027 INFO [M:0;607fd5c6574c:40555 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-04T06:52:08,027 DEBUG [M:0;607fd5c6574c:40555 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-04T06:52:08,027 INFO [M:0;607fd5c6574c:40555 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:52:08,027 DEBUG [M:0;607fd5c6574c:40555 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:52:08,027 DEBUG [M:0;607fd5c6574c:40555 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-04T06:52:08,027 DEBUG [M:0;607fd5c6574c:40555 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:52:08,028 INFO [M:0;607fd5c6574c:40555 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.01 KB heapSize=29.18 KB 2024-12-04T06:52:08,047 DEBUG [M:0;607fd5c6574c:40555 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7dada6e53bec465e9460ee790e30a679 is 82, key is hbase:meta,,1/info:regioninfo/1733295034874/Put/seqid=0 2024-12-04T06:52:08,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33339 is added to blk_1073741855_1031 (size=5672) 2024-12-04T06:52:08,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741855_1031 (size=5672) 2024-12-04T06:52:08,056 INFO [M:0;607fd5c6574c:40555 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7dada6e53bec465e9460ee790e30a679 2024-12-04T06:52:08,084 DEBUG [M:0;607fd5c6574c:40555 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5b8767c9e70045fca18bb0ee60d91ddf is 765, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733295035719/Put/seqid=0 2024-12-04T06:52:08,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33339 is added to blk_1073741856_1032 (size=6246) 2024-12-04T06:52:08,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741856_1032 (size=6246) 2024-12-04T06:52:08,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41055-0x1017c3d2f610001, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T06:52:08,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41055-0x1017c3d2f610001, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T06:52:08,106 INFO [RS:0;607fd5c6574c:41055 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T06:52:08,107 INFO [RS:0;607fd5c6574c:41055 {}] regionserver.HRegionServer(1031): Exiting; stopping=607fd5c6574c,41055,1733295033031; zookeeper connection closed. 2024-12-04T06:52:08,107 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@a97105b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@a97105b 2024-12-04T06:52:08,108 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-04T06:52:08,158 INFO [regionserver/607fd5c6574c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T06:52:08,492 INFO [M:0;607fd5c6574c:40555 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.41 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5b8767c9e70045fca18bb0ee60d91ddf 2024-12-04T06:52:08,499 INFO [M:0;607fd5c6574c:40555 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 5b8767c9e70045fca18bb0ee60d91ddf 2024-12-04T06:52:08,516 DEBUG [M:0;607fd5c6574c:40555 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f7241def9d924177aaa12d28e7294b41 is 69, key is 607fd5c6574c,41055,1733295033031/rs:state/1733295034065/Put/seqid=0 2024-12-04T06:52:08,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33339 is added to blk_1073741857_1033 (size=5156) 2024-12-04T06:52:08,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741857_1033 (size=5156) 2024-12-04T06:52:08,523 INFO [M:0;607fd5c6574c:40555 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f7241def9d924177aaa12d28e7294b41 2024-12-04T06:52:08,545 DEBUG [M:0;607fd5c6574c:40555 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/22c8f63b42e2488882b6e4bbd33f294f is 52, key is load_balancer_on/state:d/1733295035199/Put/seqid=0 2024-12-04T06:52:08,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741858_1034 (size=5056) 2024-12-04T06:52:08,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33339 is added to blk_1073741858_1034 (size=5056) 2024-12-04T06:52:08,552 INFO [M:0;607fd5c6574c:40555 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/22c8f63b42e2488882b6e4bbd33f294f 2024-12-04T06:52:08,561 DEBUG [M:0;607fd5c6574c:40555 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7dada6e53bec465e9460ee790e30a679 as hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7dada6e53bec465e9460ee790e30a679 2024-12-04T06:52:08,568 INFO [M:0;607fd5c6574c:40555 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7dada6e53bec465e9460ee790e30a679, entries=8, sequenceid=59, filesize=5.5 K 2024-12-04T06:52:08,569 DEBUG [M:0;607fd5c6574c:40555 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5b8767c9e70045fca18bb0ee60d91ddf as hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5b8767c9e70045fca18bb0ee60d91ddf 2024-12-04T06:52:08,575 INFO [M:0;607fd5c6574c:40555 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 5b8767c9e70045fca18bb0ee60d91ddf 2024-12-04T06:52:08,576 INFO [M:0;607fd5c6574c:40555 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5b8767c9e70045fca18bb0ee60d91ddf, entries=6, sequenceid=59, filesize=6.1 K 2024-12-04T06:52:08,577 DEBUG [M:0;607fd5c6574c:40555 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f7241def9d924177aaa12d28e7294b41 as hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f7241def9d924177aaa12d28e7294b41 2024-12-04T06:52:08,583 INFO [M:0;607fd5c6574c:40555 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f7241def9d924177aaa12d28e7294b41, entries=1, sequenceid=59, filesize=5.0 K 2024-12-04T06:52:08,585 DEBUG [M:0;607fd5c6574c:40555 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/22c8f63b42e2488882b6e4bbd33f294f as hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/22c8f63b42e2488882b6e4bbd33f294f 2024-12-04T06:52:08,592 INFO [M:0;607fd5c6574c:40555 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/22c8f63b42e2488882b6e4bbd33f294f, entries=1, sequenceid=59, filesize=4.9 K 2024-12-04T06:52:08,594 INFO [M:0;607fd5c6574c:40555 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.01 KB/23564, heapSize ~29.12 KB/29816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 567ms, sequenceid=59, compaction requested=false 2024-12-04T06:52:08,596 INFO [M:0;607fd5c6574c:40555 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:52:08,596 DEBUG [M:0;607fd5c6574c:40555 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733295128027Disabling compacts and flushes for region at 1733295128027Disabling writes for close at 1733295128027Obtaining lock to block concurrent updates at 1733295128028 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733295128028Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23564, getHeapSize=29816, getOffHeapSize=0, getCellsCount=70 at 1733295128028Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733295128029 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733295128029Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733295128046 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733295128047 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733295128064 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733295128084 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733295128084Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733295128499 (+415 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733295128515 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733295128515Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733295128529 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733295128544 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733295128544Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5c90f038: reopening flushed file at 1733295128560 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f2c7a8d: reopening flushed file at 1733295128568 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5bb99946: reopening flushed file at 1733295128576 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@12245b78: reopening flushed file at 1733295128583 (+7 ms)Finished flush of dataSize ~23.01 KB/23564, heapSize ~29.12 KB/29816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 567ms, sequenceid=59, compaction requested=false at 1733295128594 (+11 ms)Writing region close event to WAL at 1733295128596 (+2 ms)Closed at 1733295128596 2024-12-04T06:52:08,597 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:08,597 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:08,597 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:08,597 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:08,597 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:08,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33339 is added to blk_1073741830_1006 (size=27961) 2024-12-04T06:52:08,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36347 is added to blk_1073741830_1006 (size=27961) 2024-12-04T06:52:08,601 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T06:52:08,601 INFO [M:0;607fd5c6574c:40555 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-04T06:52:08,601 INFO [M:0;607fd5c6574c:40555 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40555 2024-12-04T06:52:08,601 INFO [M:0;607fd5c6574c:40555 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T06:52:08,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40555-0x1017c3d2f610000, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T06:52:08,703 INFO [M:0;607fd5c6574c:40555 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T06:52:08,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40555-0x1017c3d2f610000, quorum=127.0.0.1:56093, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T06:52:08,709 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:52:08,711 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T06:52:08,711 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T06:52:08,711 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T06:52:08,711 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/hadoop.log.dir/,STOPPED} 2024-12-04T06:52:08,715 WARN [BP-14357224-172.17.0.2-1733295029427 heartbeating to localhost/127.0.0.1:38117 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T06:52:08,715 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T06:52:08,715 WARN [BP-14357224-172.17.0.2-1733295029427 heartbeating to localhost/127.0.0.1:38117 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-14357224-172.17.0.2-1733295029427 (Datanode Uuid 38d40b95-bb2f-494b-bfbc-f254c2127156) service to localhost/127.0.0.1:38117 2024-12-04T06:52:08,715 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T06:52:08,716 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/cluster_3cc6c746-eb6c-4595-67e3-3ec4389ad478/data/data3/current/BP-14357224-172.17.0.2-1733295029427 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:52:08,717 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/cluster_3cc6c746-eb6c-4595-67e3-3ec4389ad478/data/data4/current/BP-14357224-172.17.0.2-1733295029427 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:52:08,717 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T06:52:08,720 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:52:08,720 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T06:52:08,720 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T06:52:08,721 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T06:52:08,721 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/hadoop.log.dir/,STOPPED} 2024-12-04T06:52:08,723 WARN [BP-14357224-172.17.0.2-1733295029427 heartbeating to localhost/127.0.0.1:38117 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T06:52:08,723 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T06:52:08,723 WARN [BP-14357224-172.17.0.2-1733295029427 heartbeating to localhost/127.0.0.1:38117 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-14357224-172.17.0.2-1733295029427 (Datanode Uuid ba6926ec-cfd9-48eb-b711-8df620d49d27) service to localhost/127.0.0.1:38117 2024-12-04T06:52:08,723 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T06:52:08,723 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/cluster_3cc6c746-eb6c-4595-67e3-3ec4389ad478/data/data1/current/BP-14357224-172.17.0.2-1733295029427 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:52:08,723 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/cluster_3cc6c746-eb6c-4595-67e3-3ec4389ad478/data/data2/current/BP-14357224-172.17.0.2-1733295029427 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:52:08,724 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T06:52:08,733 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-04T06:52:08,734 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T06:52:08,734 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T06:52:08,734 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T06:52:08,734 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/hadoop.log.dir/,STOPPED} 2024-12-04T06:52:08,745 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-04T06:52:08,778 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-04T06:52:08,789 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=80 (was 12) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38117 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38117 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:38117 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: master/607fd5c6574c:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/607fd5c6574c:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38117 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/607fd5c6574c:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@c3327a java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:38117 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:38117 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38117 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:38117 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=404 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=161 (was 150) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6551 (was 6988) 2024-12-04T06:52:08,796 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=81, OpenFileDescriptor=404, MaxFileDescriptor=1048576, SystemLoadAverage=161, ProcessCount=11, AvailableMemoryMB=6551 2024-12-04T06:52:08,796 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-04T06:52:08,796 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/hadoop.log.dir so I do NOT create it in target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a 2024-12-04T06:52:08,796 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d092c387-2d2a-05a1-1c2f-6245c776aecd/hadoop.tmp.dir so I do NOT create it in target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a 2024-12-04T06:52:08,797 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/cluster_d9efd569-6419-5db6-32ee-d01d56c44834, deleteOnExit=true 2024-12-04T06:52:08,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-04T06:52:08,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/test.cache.data in system properties and HBase conf 2024-12-04T06:52:08,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/hadoop.tmp.dir in system properties and HBase conf 2024-12-04T06:52:08,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/hadoop.log.dir in system properties and HBase conf 2024-12-04T06:52:08,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-04T06:52:08,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-04T06:52:08,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-04T06:52:08,798 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-04T06:52:08,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-04T06:52:08,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-04T06:52:08,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-04T06:52:08,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-04T06:52:08,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-04T06:52:08,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-04T06:52:08,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-04T06:52:08,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-04T06:52:08,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-04T06:52:08,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/nfs.dump.dir in system properties and HBase conf 2024-12-04T06:52:08,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/java.io.tmpdir in system properties and HBase conf 2024-12-04T06:52:08,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-04T06:52:08,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-04T06:52:08,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-04T06:52:08,813 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-04T06:52:08,897 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:52:08,903 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T06:52:08,907 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T06:52:08,907 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T06:52:08,907 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-04T06:52:08,909 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:52:08,912 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f681677{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/hadoop.log.dir/,AVAILABLE} 2024-12-04T06:52:08,913 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3197ca45{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T06:52:09,039 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@49a88a00{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/java.io.tmpdir/jetty-localhost-44007-hadoop-hdfs-3_4_1-tests_jar-_-any-17324530265691312499/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-04T06:52:09,039 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4e4256d4{HTTP/1.1, (http/1.1)}{localhost:44007} 2024-12-04T06:52:09,039 INFO [Time-limited test {}] server.Server(415): Started @101564ms 2024-12-04T06:52:09,056 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-04T06:52:09,147 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:52:09,151 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T06:52:09,152 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T06:52:09,152 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T06:52:09,152 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T06:52:09,152 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@469dec96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/hadoop.log.dir/,AVAILABLE} 2024-12-04T06:52:09,153 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@eab7acc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T06:52:09,269 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2152d149{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/java.io.tmpdir/jetty-localhost-46555-hadoop-hdfs-3_4_1-tests_jar-_-any-8625841322279523761/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:52:09,270 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1d281952{HTTP/1.1, (http/1.1)}{localhost:46555} 2024-12-04T06:52:09,270 INFO [Time-limited test {}] server.Server(415): Started @101795ms 2024-12-04T06:52:09,272 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T06:52:09,315 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:52:09,319 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T06:52:09,320 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T06:52:09,320 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T06:52:09,320 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T06:52:09,320 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@274298f3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/hadoop.log.dir/,AVAILABLE} 2024-12-04T06:52:09,321 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c1be80f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T06:52:09,396 WARN [Thread-438 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/cluster_d9efd569-6419-5db6-32ee-d01d56c44834/data/data1/current/BP-678346079-172.17.0.2-1733295128832/current, will proceed with Du for space computation calculation, 2024-12-04T06:52:09,396 WARN [Thread-439 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/cluster_d9efd569-6419-5db6-32ee-d01d56c44834/data/data2/current/BP-678346079-172.17.0.2-1733295128832/current, will proceed with Du for space computation calculation, 2024-12-04T06:52:09,424 WARN [Thread-417 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T06:52:09,427 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x41575c41af153aa7 with lease ID 0x486199bee376c5e7: Processing first storage report for DS-324c2bf1-ad58-4bbb-aa79-d9c6d847ca07 from datanode DatanodeRegistration(127.0.0.1:45527, datanodeUuid=ee65e552-8c8e-4b1c-a90b-f7d2727f3f21, infoPort=42673, infoSecurePort=0, ipcPort=38931, storageInfo=lv=-57;cid=testClusterID;nsid=529179691;c=1733295128832) 2024-12-04T06:52:09,427 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x41575c41af153aa7 with lease ID 0x486199bee376c5e7: from storage DS-324c2bf1-ad58-4bbb-aa79-d9c6d847ca07 node DatanodeRegistration(127.0.0.1:45527, datanodeUuid=ee65e552-8c8e-4b1c-a90b-f7d2727f3f21, infoPort=42673, infoSecurePort=0, ipcPort=38931, storageInfo=lv=-57;cid=testClusterID;nsid=529179691;c=1733295128832), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:52:09,427 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x41575c41af153aa7 with lease ID 0x486199bee376c5e7: Processing first storage report for DS-322b4fa7-8c73-432f-9b29-784a11258dc1 from datanode DatanodeRegistration(127.0.0.1:45527, datanodeUuid=ee65e552-8c8e-4b1c-a90b-f7d2727f3f21, infoPort=42673, infoSecurePort=0, ipcPort=38931, storageInfo=lv=-57;cid=testClusterID;nsid=529179691;c=1733295128832) 2024-12-04T06:52:09,427 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x41575c41af153aa7 with lease ID 0x486199bee376c5e7: from storage DS-322b4fa7-8c73-432f-9b29-784a11258dc1 node DatanodeRegistration(127.0.0.1:45527, datanodeUuid=ee65e552-8c8e-4b1c-a90b-f7d2727f3f21, infoPort=42673, infoSecurePort=0, ipcPort=38931, storageInfo=lv=-57;cid=testClusterID;nsid=529179691;c=1733295128832), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:52:09,447 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6838bf55{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/java.io.tmpdir/jetty-localhost-35285-hadoop-hdfs-3_4_1-tests_jar-_-any-11739024819069152853/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:52:09,448 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@254e0164{HTTP/1.1, (http/1.1)}{localhost:35285} 2024-12-04T06:52:09,448 INFO [Time-limited test {}] server.Server(415): Started @101973ms 2024-12-04T06:52:09,450 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T06:52:09,573 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/cluster_d9efd569-6419-5db6-32ee-d01d56c44834/data/data3/current/BP-678346079-172.17.0.2-1733295128832/current, will proceed with Du for space computation calculation, 2024-12-04T06:52:09,574 WARN [Thread-465 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/cluster_d9efd569-6419-5db6-32ee-d01d56c44834/data/data4/current/BP-678346079-172.17.0.2-1733295128832/current, will proceed with Du for space computation calculation, 2024-12-04T06:52:09,593 WARN [Thread-453 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T06:52:09,596 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x459a51f31657dd2b with lease ID 0x486199bee376c5e8: Processing first storage report for DS-49d0442c-a49f-45a4-abab-92196f01f48d from datanode DatanodeRegistration(127.0.0.1:45511, datanodeUuid=a86fc730-5639-4f00-aae8-733dbef89a3a, infoPort=43233, infoSecurePort=0, ipcPort=46847, storageInfo=lv=-57;cid=testClusterID;nsid=529179691;c=1733295128832) 2024-12-04T06:52:09,596 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x459a51f31657dd2b with lease ID 0x486199bee376c5e8: from storage DS-49d0442c-a49f-45a4-abab-92196f01f48d node DatanodeRegistration(127.0.0.1:45511, datanodeUuid=a86fc730-5639-4f00-aae8-733dbef89a3a, infoPort=43233, infoSecurePort=0, ipcPort=46847, storageInfo=lv=-57;cid=testClusterID;nsid=529179691;c=1733295128832), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:52:09,596 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x459a51f31657dd2b with lease ID 0x486199bee376c5e8: Processing first storage report for DS-a8c8697f-4875-4271-bef1-308d16deaeb0 from datanode DatanodeRegistration(127.0.0.1:45511, datanodeUuid=a86fc730-5639-4f00-aae8-733dbef89a3a, infoPort=43233, infoSecurePort=0, ipcPort=46847, storageInfo=lv=-57;cid=testClusterID;nsid=529179691;c=1733295128832) 2024-12-04T06:52:09,596 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x459a51f31657dd2b with lease ID 0x486199bee376c5e8: from storage DS-a8c8697f-4875-4271-bef1-308d16deaeb0 node DatanodeRegistration(127.0.0.1:45511, datanodeUuid=a86fc730-5639-4f00-aae8-733dbef89a3a, infoPort=43233, infoSecurePort=0, ipcPort=46847, storageInfo=lv=-57;cid=testClusterID;nsid=529179691;c=1733295128832), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:52:09,685 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a 2024-12-04T06:52:09,688 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/cluster_d9efd569-6419-5db6-32ee-d01d56c44834/zookeeper_0, clientPort=52340, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/cluster_d9efd569-6419-5db6-32ee-d01d56c44834/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/cluster_d9efd569-6419-5db6-32ee-d01d56c44834/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-04T06:52:09,689 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52340 2024-12-04T06:52:09,690 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:52:09,692 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:52:09,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45527 is added to blk_1073741825_1001 (size=7) 2024-12-04T06:52:09,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741825_1001 (size=7) 2024-12-04T06:52:09,704 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d with version=8 2024-12-04T06:52:09,705 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/hbase-staging 2024-12-04T06:52:09,707 INFO [Time-limited test {}] client.ConnectionUtils(128): master/607fd5c6574c:0 server-side Connection retries=45 2024-12-04T06:52:09,707 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T06:52:09,707 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T06:52:09,707 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T06:52:09,707 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T06:52:09,707 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T06:52:09,707 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-04T06:52:09,708 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T06:52:09,708 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43845 2024-12-04T06:52:09,710 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43845 connecting to ZooKeeper ensemble=127.0.0.1:52340 2024-12-04T06:52:09,719 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:438450x0, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T06:52:09,719 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43845-0x1017c3eaee70000 connected 2024-12-04T06:52:09,742 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:52:09,744 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:52:09,748 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43845-0x1017c3eaee70000, quorum=127.0.0.1:52340, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T06:52:09,748 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d, hbase.cluster.distributed=false 2024-12-04T06:52:09,751 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43845-0x1017c3eaee70000, quorum=127.0.0.1:52340, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T06:52:09,753 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43845 2024-12-04T06:52:09,753 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43845 2024-12-04T06:52:09,754 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43845 2024-12-04T06:52:09,756 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43845 2024-12-04T06:52:09,756 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43845 2024-12-04T06:52:09,783 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/607fd5c6574c:0 server-side Connection retries=45 2024-12-04T06:52:09,783 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T06:52:09,783 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T06:52:09,784 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T06:52:09,784 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T06:52:09,784 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T06:52:09,784 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-04T06:52:09,785 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T06:52:09,785 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37507 2024-12-04T06:52:09,787 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37507 connecting to ZooKeeper ensemble=127.0.0.1:52340 2024-12-04T06:52:09,788 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:52:09,791 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:52:09,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:375070x0, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T06:52:09,813 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:375070x0, quorum=127.0.0.1:52340, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T06:52:09,814 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-04T06:52:09,825 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37507-0x1017c3eaee70001 connected 2024-12-04T06:52:09,827 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-04T06:52:09,828 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37507-0x1017c3eaee70001, quorum=127.0.0.1:52340, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-04T06:52:09,829 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37507-0x1017c3eaee70001, quorum=127.0.0.1:52340, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T06:52:09,833 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37507 2024-12-04T06:52:09,835 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37507 2024-12-04T06:52:09,836 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37507 2024-12-04T06:52:09,837 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37507 2024-12-04T06:52:09,837 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37507 2024-12-04T06:52:09,851 DEBUG [M:0;607fd5c6574c:43845 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;607fd5c6574c:43845 2024-12-04T06:52:09,851 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/607fd5c6574c,43845,1733295129707 2024-12-04T06:52:09,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37507-0x1017c3eaee70001, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T06:52:09,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43845-0x1017c3eaee70000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T06:52:09,854 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43845-0x1017c3eaee70000, quorum=127.0.0.1:52340, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/607fd5c6574c,43845,1733295129707 2024-12-04T06:52:09,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37507-0x1017c3eaee70001, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-04T06:52:09,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37507-0x1017c3eaee70001, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:09,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43845-0x1017c3eaee70000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:09,857 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43845-0x1017c3eaee70000, quorum=127.0.0.1:52340, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-04T06:52:09,858 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/607fd5c6574c,43845,1733295129707 from backup master directory 2024-12-04T06:52:09,861 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37507-0x1017c3eaee70001, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T06:52:09,862 WARN [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T06:52:09,862 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=607fd5c6574c,43845,1733295129707 2024-12-04T06:52:09,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43845-0x1017c3eaee70000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/607fd5c6574c,43845,1733295129707 2024-12-04T06:52:09,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43845-0x1017c3eaee70000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T06:52:09,868 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/hbase.id] with ID: 08bb85e6-6346-46de-a64d-16e925969e80 2024-12-04T06:52:09,868 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/.tmp/hbase.id 2024-12-04T06:52:09,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741826_1002 (size=42) 2024-12-04T06:52:09,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45527 is added to blk_1073741826_1002 (size=42) 2024-12-04T06:52:09,885 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/.tmp/hbase.id]:[hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/hbase.id] 2024-12-04T06:52:09,903 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:52:09,903 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-04T06:52:09,905 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-04T06:52:09,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43845-0x1017c3eaee70000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:09,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37507-0x1017c3eaee70001, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:09,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741827_1003 (size=196) 2024-12-04T06:52:09,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45527 is added to blk_1073741827_1003 (size=196) 2024-12-04T06:52:09,924 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T06:52:09,925 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-04T06:52:09,925 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T06:52:09,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741828_1004 (size=1189) 2024-12-04T06:52:09,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45527 is added to blk_1073741828_1004 (size=1189) 2024-12-04T06:52:09,944 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/MasterData/data/master/store 2024-12-04T06:52:09,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45527 is added to blk_1073741829_1005 (size=34) 2024-12-04T06:52:09,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741829_1005 (size=34) 2024-12-04T06:52:09,954 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:52:09,954 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-04T06:52:09,954 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:52:09,954 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:52:09,955 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-04T06:52:09,955 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:52:09,955 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:52:09,955 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733295129954Disabling compacts and flushes for region at 1733295129954Disabling writes for close at 1733295129955 (+1 ms)Writing region close event to WAL at 1733295129955Closed at 1733295129955 2024-12-04T06:52:09,957 WARN [master/607fd5c6574c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/MasterData/data/master/store/.initializing 2024-12-04T06:52:09,957 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/MasterData/WALs/607fd5c6574c,43845,1733295129707 2024-12-04T06:52:09,961 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=607fd5c6574c%2C43845%2C1733295129707, suffix=, logDir=hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/MasterData/WALs/607fd5c6574c,43845,1733295129707, archiveDir=hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/MasterData/oldWALs, maxLogs=10 2024-12-04T06:52:09,961 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C43845%2C1733295129707.1733295129961 2024-12-04T06:52:09,967 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/MasterData/WALs/607fd5c6574c,43845,1733295129707/607fd5c6574c%2C43845%2C1733295129707.1733295129961 2024-12-04T06:52:09,968 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42673:42673),(127.0.0.1/127.0.0.1:43233:43233)] 2024-12-04T06:52:09,980 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-04T06:52:09,980 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:52:09,981 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:09,981 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:09,983 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:09,985 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-04T06:52:09,985 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:09,992 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:52:09,993 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:09,995 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-04T06:52:09,995 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:09,996 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T06:52:09,996 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:09,999 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-04T06:52:09,999 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:10,000 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T06:52:10,000 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:10,002 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-04T06:52:10,002 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:10,003 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T06:52:10,003 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:10,004 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:10,005 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:10,007 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:10,007 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:10,008 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-04T06:52:10,010 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:10,013 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T06:52:10,014 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=800757, jitterRate=0.018215954303741455}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-04T06:52:10,015 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733295129981Initializing all the Stores at 1733295129983 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295129983Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295129983Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295129983Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295129983Cleaning up temporary data from old regions at 1733295130007 (+24 ms)Region opened successfully at 1733295130015 (+8 ms) 2024-12-04T06:52:10,016 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-04T06:52:10,020 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ffc4140, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=607fd5c6574c/172.17.0.2:0 2024-12-04T06:52:10,022 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-04T06:52:10,022 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-04T06:52:10,022 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-04T06:52:10,022 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-04T06:52:10,023 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-04T06:52:10,024 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-04T06:52:10,024 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-04T06:52:10,031 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-04T06:52:10,032 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43845-0x1017c3eaee70000, quorum=127.0.0.1:52340, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-04T06:52:10,034 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-04T06:52:10,035 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-04T06:52:10,036 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43845-0x1017c3eaee70000, quorum=127.0.0.1:52340, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-04T06:52:10,038 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-04T06:52:10,038 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-04T06:52:10,041 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43845-0x1017c3eaee70000, quorum=127.0.0.1:52340, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-04T06:52:10,044 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-04T06:52:10,046 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43845-0x1017c3eaee70000, quorum=127.0.0.1:52340, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-04T06:52:10,048 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-04T06:52:10,051 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43845-0x1017c3eaee70000, quorum=127.0.0.1:52340, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-04T06:52:10,052 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-04T06:52:10,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37507-0x1017c3eaee70001, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T06:52:10,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37507-0x1017c3eaee70001, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:10,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43845-0x1017c3eaee70000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T06:52:10,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43845-0x1017c3eaee70000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:10,057 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=607fd5c6574c,43845,1733295129707, sessionid=0x1017c3eaee70000, setting cluster-up flag (Was=false) 2024-12-04T06:52:10,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37507-0x1017c3eaee70001, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:10,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43845-0x1017c3eaee70000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:10,070 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-04T06:52:10,077 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=607fd5c6574c,43845,1733295129707 2024-12-04T06:52:10,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37507-0x1017c3eaee70001, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:10,083 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43845-0x1017c3eaee70000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:10,095 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-04T06:52:10,096 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=607fd5c6574c,43845,1733295129707 2024-12-04T06:52:10,098 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-04T06:52:10,100 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-04T06:52:10,101 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-04T06:52:10,101 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-04T06:52:10,101 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 607fd5c6574c,43845,1733295129707 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-04T06:52:10,103 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/607fd5c6574c:0, corePoolSize=5, maxPoolSize=5 2024-12-04T06:52:10,103 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/607fd5c6574c:0, corePoolSize=5, maxPoolSize=5 2024-12-04T06:52:10,103 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/607fd5c6574c:0, corePoolSize=5, maxPoolSize=5 2024-12-04T06:52:10,103 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/607fd5c6574c:0, corePoolSize=5, maxPoolSize=5 2024-12-04T06:52:10,103 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/607fd5c6574c:0, corePoolSize=10, maxPoolSize=10 2024-12-04T06:52:10,103 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:10,103 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/607fd5c6574c:0, corePoolSize=2, maxPoolSize=2 2024-12-04T06:52:10,103 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:10,107 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T06:52:10,107 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-04T06:52:10,108 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733295160108 2024-12-04T06:52:10,109 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-04T06:52:10,109 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-04T06:52:10,109 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-04T06:52:10,109 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-04T06:52:10,109 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-04T06:52:10,109 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-04T06:52:10,109 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:10,109 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:10,109 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-04T06:52:10,112 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-04T06:52:10,113 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-04T06:52:10,113 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-04T06:52:10,113 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-04T06:52:10,113 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-04T06:52:10,113 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.large.0-1733295130113,5,FailOnTimeoutGroup] 2024-12-04T06:52:10,116 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.small.0-1733295130113,5,FailOnTimeoutGroup] 2024-12-04T06:52:10,116 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:10,116 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-04T06:52:10,116 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:10,116 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:10,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45527 is added to blk_1073741831_1007 (size=1321) 2024-12-04T06:52:10,126 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-04T06:52:10,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741831_1007 (size=1321) 2024-12-04T06:52:10,128 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d 2024-12-04T06:52:10,142 INFO [RS:0;607fd5c6574c:37507 {}] regionserver.HRegionServer(746): ClusterId : 08bb85e6-6346-46de-a64d-16e925969e80 2024-12-04T06:52:10,142 DEBUG [RS:0;607fd5c6574c:37507 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-04T06:52:10,145 DEBUG [RS:0;607fd5c6574c:37507 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-04T06:52:10,145 DEBUG [RS:0;607fd5c6574c:37507 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-04T06:52:10,147 DEBUG [RS:0;607fd5c6574c:37507 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-04T06:52:10,148 DEBUG [RS:0;607fd5c6574c:37507 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16afaf9b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=607fd5c6574c/172.17.0.2:0 2024-12-04T06:52:10,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45527 is added to blk_1073741832_1008 (size=32) 2024-12-04T06:52:10,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741832_1008 (size=32) 2024-12-04T06:52:10,152 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:52:10,154 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-04T06:52:10,155 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-04T06:52:10,155 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:10,156 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:52:10,156 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-04T06:52:10,158 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-04T06:52:10,158 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:10,158 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:52:10,158 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-04T06:52:10,160 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-04T06:52:10,160 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:10,161 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:52:10,161 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-04T06:52:10,162 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-04T06:52:10,162 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:10,163 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:52:10,163 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-04T06:52:10,164 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/data/hbase/meta/1588230740 2024-12-04T06:52:10,164 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/data/hbase/meta/1588230740 2024-12-04T06:52:10,166 DEBUG [RS:0;607fd5c6574c:37507 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;607fd5c6574c:37507 2024-12-04T06:52:10,166 INFO [RS:0;607fd5c6574c:37507 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-04T06:52:10,166 INFO [RS:0;607fd5c6574c:37507 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-04T06:52:10,166 DEBUG [RS:0;607fd5c6574c:37507 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-04T06:52:10,166 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-04T06:52:10,166 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-04T06:52:10,167 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T06:52:10,167 INFO [RS:0;607fd5c6574c:37507 {}] regionserver.HRegionServer(2659): reportForDuty to master=607fd5c6574c,43845,1733295129707 with port=37507, startcode=1733295129783 2024-12-04T06:52:10,168 DEBUG [RS:0;607fd5c6574c:37507 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-04T06:52:10,169 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-04T06:52:10,171 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40669, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-04T06:52:10,172 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T06:52:10,172 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43845 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 607fd5c6574c,37507,1733295129783 2024-12-04T06:52:10,172 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43845 {}] master.ServerManager(517): Registering regionserver=607fd5c6574c,37507,1733295129783 2024-12-04T06:52:10,172 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=853438, jitterRate=0.08520331978797913}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T06:52:10,174 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733295130152Initializing all the Stores at 1733295130153 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295130153Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295130153Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295130153Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295130153Cleaning up temporary data from old regions at 1733295130166 (+13 ms)Region opened successfully at 1733295130174 (+8 ms) 2024-12-04T06:52:10,174 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-04T06:52:10,174 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-04T06:52:10,174 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-04T06:52:10,174 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-04T06:52:10,174 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-04T06:52:10,175 DEBUG [RS:0;607fd5c6574c:37507 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d 2024-12-04T06:52:10,175 DEBUG [RS:0;607fd5c6574c:37507 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42649 2024-12-04T06:52:10,175 DEBUG [RS:0;607fd5c6574c:37507 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-04T06:52:10,175 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-04T06:52:10,175 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733295130174Disabling compacts and flushes for region at 1733295130174Disabling writes for close at 1733295130174Writing region close event to WAL at 1733295130175 (+1 ms)Closed at 1733295130175 2024-12-04T06:52:10,177 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T06:52:10,177 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-04T06:52:10,177 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-04T06:52:10,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43845-0x1017c3eaee70000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T06:52:10,179 DEBUG [RS:0;607fd5c6574c:37507 {}] zookeeper.ZKUtil(111): regionserver:37507-0x1017c3eaee70001, quorum=127.0.0.1:52340, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/607fd5c6574c,37507,1733295129783 2024-12-04T06:52:10,179 WARN [RS:0;607fd5c6574c:37507 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T06:52:10,179 INFO [RS:0;607fd5c6574c:37507 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T06:52:10,179 DEBUG [RS:0;607fd5c6574c:37507 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/WALs/607fd5c6574c,37507,1733295129783 2024-12-04T06:52:10,179 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-04T06:52:10,179 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [607fd5c6574c,37507,1733295129783] 2024-12-04T06:52:10,185 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-04T06:52:10,188 INFO [RS:0;607fd5c6574c:37507 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-04T06:52:10,195 INFO [RS:0;607fd5c6574c:37507 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-04T06:52:10,196 INFO [RS:0;607fd5c6574c:37507 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-04T06:52:10,196 INFO [RS:0;607fd5c6574c:37507 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:10,196 INFO [RS:0;607fd5c6574c:37507 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-04T06:52:10,197 INFO [RS:0;607fd5c6574c:37507 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-04T06:52:10,198 INFO [RS:0;607fd5c6574c:37507 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:10,198 DEBUG [RS:0;607fd5c6574c:37507 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:10,198 DEBUG [RS:0;607fd5c6574c:37507 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:10,198 DEBUG [RS:0;607fd5c6574c:37507 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:10,198 DEBUG [RS:0;607fd5c6574c:37507 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:10,198 DEBUG [RS:0;607fd5c6574c:37507 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:10,198 DEBUG [RS:0;607fd5c6574c:37507 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/607fd5c6574c:0, corePoolSize=2, maxPoolSize=2 2024-12-04T06:52:10,198 DEBUG [RS:0;607fd5c6574c:37507 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:10,199 DEBUG [RS:0;607fd5c6574c:37507 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:10,199 DEBUG [RS:0;607fd5c6574c:37507 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:10,199 DEBUG [RS:0;607fd5c6574c:37507 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:10,199 DEBUG [RS:0;607fd5c6574c:37507 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:10,199 DEBUG [RS:0;607fd5c6574c:37507 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:10,199 DEBUG [RS:0;607fd5c6574c:37507 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/607fd5c6574c:0, corePoolSize=3, maxPoolSize=3 2024-12-04T06:52:10,199 DEBUG [RS:0;607fd5c6574c:37507 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0, corePoolSize=3, maxPoolSize=3 2024-12-04T06:52:10,201 INFO [RS:0;607fd5c6574c:37507 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:10,201 INFO [RS:0;607fd5c6574c:37507 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:10,202 INFO [RS:0;607fd5c6574c:37507 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:10,202 INFO [RS:0;607fd5c6574c:37507 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:10,202 INFO [RS:0;607fd5c6574c:37507 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:10,202 INFO [RS:0;607fd5c6574c:37507 {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,37507,1733295129783-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T06:52:10,227 INFO [RS:0;607fd5c6574c:37507 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-04T06:52:10,227 INFO [RS:0;607fd5c6574c:37507 {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,37507,1733295129783-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:10,227 INFO [RS:0;607fd5c6574c:37507 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:10,228 INFO [RS:0;607fd5c6574c:37507 {}] regionserver.Replication(171): 607fd5c6574c,37507,1733295129783 started 2024-12-04T06:52:10,252 INFO [RS:0;607fd5c6574c:37507 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:10,252 INFO [RS:0;607fd5c6574c:37507 {}] regionserver.HRegionServer(1482): Serving as 607fd5c6574c,37507,1733295129783, RpcServer on 607fd5c6574c/172.17.0.2:37507, sessionid=0x1017c3eaee70001 2024-12-04T06:52:10,253 DEBUG [RS:0;607fd5c6574c:37507 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-04T06:52:10,253 DEBUG [RS:0;607fd5c6574c:37507 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 607fd5c6574c,37507,1733295129783 2024-12-04T06:52:10,253 DEBUG [RS:0;607fd5c6574c:37507 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '607fd5c6574c,37507,1733295129783' 2024-12-04T06:52:10,253 DEBUG [RS:0;607fd5c6574c:37507 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-04T06:52:10,254 DEBUG [RS:0;607fd5c6574c:37507 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-04T06:52:10,254 DEBUG [RS:0;607fd5c6574c:37507 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-04T06:52:10,254 DEBUG [RS:0;607fd5c6574c:37507 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-04T06:52:10,254 DEBUG [RS:0;607fd5c6574c:37507 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 607fd5c6574c,37507,1733295129783 2024-12-04T06:52:10,254 DEBUG [RS:0;607fd5c6574c:37507 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '607fd5c6574c,37507,1733295129783' 2024-12-04T06:52:10,254 DEBUG [RS:0;607fd5c6574c:37507 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-04T06:52:10,255 DEBUG [RS:0;607fd5c6574c:37507 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-04T06:52:10,256 DEBUG [RS:0;607fd5c6574c:37507 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-04T06:52:10,256 INFO [RS:0;607fd5c6574c:37507 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-04T06:52:10,256 INFO [RS:0;607fd5c6574c:37507 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-04T06:52:10,336 WARN [607fd5c6574c:43845 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-04T06:52:10,358 INFO [RS:0;607fd5c6574c:37507 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=607fd5c6574c%2C37507%2C1733295129783, suffix=, logDir=hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/WALs/607fd5c6574c,37507,1733295129783, archiveDir=hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/oldWALs, maxLogs=32 2024-12-04T06:52:10,361 INFO [RS:0;607fd5c6574c:37507 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C37507%2C1733295129783.1733295130360 2024-12-04T06:52:10,367 INFO [RS:0;607fd5c6574c:37507 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/WALs/607fd5c6574c,37507,1733295129783/607fd5c6574c%2C37507%2C1733295129783.1733295130360 2024-12-04T06:52:10,369 DEBUG [RS:0;607fd5c6574c:37507 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43233:43233),(127.0.0.1/127.0.0.1:42673:42673)] 2024-12-04T06:52:10,586 DEBUG [607fd5c6574c:43845 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-04T06:52:10,587 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=607fd5c6574c,37507,1733295129783 2024-12-04T06:52:10,589 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 607fd5c6574c,37507,1733295129783, state=OPENING 2024-12-04T06:52:10,591 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-04T06:52:10,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43845-0x1017c3eaee70000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:10,594 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T06:52:10,594 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-04T06:52:10,594 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=607fd5c6574c,37507,1733295129783}] 2024-12-04T06:52:10,596 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37507-0x1017c3eaee70001, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:10,596 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T06:52:10,749 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-04T06:52:10,752 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42949, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-04T06:52:10,758 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-04T06:52:10,758 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T06:52:10,761 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=607fd5c6574c%2C37507%2C1733295129783.meta, suffix=.meta, logDir=hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/WALs/607fd5c6574c,37507,1733295129783, archiveDir=hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/oldWALs, maxLogs=32 2024-12-04T06:52:10,763 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C37507%2C1733295129783.meta.1733295130763.meta 2024-12-04T06:52:10,771 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/WALs/607fd5c6574c,37507,1733295129783/607fd5c6574c%2C37507%2C1733295129783.meta.1733295130763.meta 2024-12-04T06:52:10,774 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42673:42673),(127.0.0.1/127.0.0.1:43233:43233)] 2024-12-04T06:52:10,776 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-04T06:52:10,776 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-04T06:52:10,777 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-04T06:52:10,777 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-04T06:52:10,777 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-04T06:52:10,777 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:52:10,777 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-04T06:52:10,777 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-04T06:52:10,781 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-04T06:52:10,782 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-04T06:52:10,783 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:10,783 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:52:10,784 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-04T06:52:10,785 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-04T06:52:10,785 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:10,786 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:52:10,786 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-04T06:52:10,787 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-04T06:52:10,787 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:10,788 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:52:10,788 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-04T06:52:10,789 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-04T06:52:10,789 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:10,790 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:52:10,790 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-04T06:52:10,793 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/data/hbase/meta/1588230740 2024-12-04T06:52:10,794 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/data/hbase/meta/1588230740 2024-12-04T06:52:10,796 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-04T06:52:10,796 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-04T06:52:10,797 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T06:52:10,799 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-04T06:52:10,800 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=853529, jitterRate=0.08531834185123444}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T06:52:10,800 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-04T06:52:10,802 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733295130778Writing region info on filesystem at 1733295130778Initializing all the Stores at 1733295130779 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295130779Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295130780 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295130781 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295130781Cleaning up temporary data from old regions at 1733295130796 (+15 ms)Running coprocessor post-open hooks at 1733295130800 (+4 ms)Region opened successfully at 1733295130802 (+2 ms) 2024-12-04T06:52:10,803 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733295130749 2024-12-04T06:52:10,807 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-04T06:52:10,808 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-04T06:52:10,809 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=607fd5c6574c,37507,1733295129783 2024-12-04T06:52:10,811 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 607fd5c6574c,37507,1733295129783, state=OPEN 2024-12-04T06:52:10,818 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43845-0x1017c3eaee70000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T06:52:10,818 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37507-0x1017c3eaee70001, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T06:52:10,818 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=607fd5c6574c,37507,1733295129783 2024-12-04T06:52:10,818 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T06:52:10,818 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T06:52:10,822 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-04T06:52:10,822 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=607fd5c6574c,37507,1733295129783 in 224 msec 2024-12-04T06:52:10,827 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-04T06:52:10,827 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 645 msec 2024-12-04T06:52:10,828 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T06:52:10,829 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-04T06:52:10,831 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-04T06:52:10,831 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=607fd5c6574c,37507,1733295129783, seqNum=-1] 2024-12-04T06:52:10,831 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T06:52:10,834 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58909, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T06:52:10,844 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 742 msec 2024-12-04T06:52:10,844 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733295130844, completionTime=-1 2024-12-04T06:52:10,844 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-04T06:52:10,844 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-04T06:52:10,847 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-04T06:52:10,847 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733295190847 2024-12-04T06:52:10,847 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733295250847 2024-12-04T06:52:10,847 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-04T06:52:10,847 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,43845,1733295129707-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:10,848 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,43845,1733295129707-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:10,848 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,43845,1733295129707-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:10,848 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-607fd5c6574c:43845, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:10,848 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:10,848 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:10,850 DEBUG [master/607fd5c6574c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-04T06:52:10,854 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.992sec 2024-12-04T06:52:10,854 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-04T06:52:10,854 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-04T06:52:10,854 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-04T06:52:10,854 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-04T06:52:10,854 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-04T06:52:10,854 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,43845,1733295129707-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T06:52:10,854 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,43845,1733295129707-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-04T06:52:10,857 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-04T06:52:10,857 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-04T06:52:10,857 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,43845,1733295129707-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:10,942 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ab35378, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T06:52:10,942 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 607fd5c6574c,43845,-1 for getting cluster id 2024-12-04T06:52:10,943 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-04T06:52:10,945 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '08bb85e6-6346-46de-a64d-16e925969e80' 2024-12-04T06:52:10,946 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-04T06:52:10,946 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "08bb85e6-6346-46de-a64d-16e925969e80" 2024-12-04T06:52:10,947 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71fba55e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T06:52:10,947 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [607fd5c6574c,43845,-1] 2024-12-04T06:52:10,948 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-04T06:52:10,948 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:52:10,950 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58028, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-04T06:52:10,952 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2fa3cb60, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T06:52:10,952 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-04T06:52:10,954 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=607fd5c6574c,37507,1733295129783, seqNum=-1] 2024-12-04T06:52:10,954 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T06:52:10,957 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47106, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T06:52:10,959 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=607fd5c6574c,43845,1733295129707 2024-12-04T06:52:10,960 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:52:10,964 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-04T06:52:10,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-04T06:52:10,964 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-04T06:52:10,964 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T06:52:10,964 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:52:10,964 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:52:10,964 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-04T06:52:10,965 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-04T06:52:10,965 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=972283204, stopped=false 2024-12-04T06:52:10,965 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=607fd5c6574c,43845,1733295129707 2024-12-04T06:52:10,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37507-0x1017c3eaee70001, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T06:52:10,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43845-0x1017c3eaee70000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T06:52:10,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37507-0x1017c3eaee70001, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:10,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43845-0x1017c3eaee70000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:10,967 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-04T06:52:10,967 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-04T06:52:10,967 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37507-0x1017c3eaee70001, quorum=127.0.0.1:52340, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T06:52:10,968 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T06:52:10,968 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:52:10,968 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '607fd5c6574c,37507,1733295129783' ***** 2024-12-04T06:52:10,968 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43845-0x1017c3eaee70000, quorum=127.0.0.1:52340, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T06:52:10,968 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-04T06:52:10,969 INFO [RS:0;607fd5c6574c:37507 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-04T06:52:10,969 INFO [RS:0;607fd5c6574c:37507 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-04T06:52:10,969 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-04T06:52:10,969 INFO [RS:0;607fd5c6574c:37507 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-04T06:52:10,969 INFO [RS:0;607fd5c6574c:37507 {}] regionserver.HRegionServer(959): stopping server 607fd5c6574c,37507,1733295129783 2024-12-04T06:52:10,969 INFO [RS:0;607fd5c6574c:37507 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T06:52:10,969 INFO [RS:0;607fd5c6574c:37507 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;607fd5c6574c:37507. 2024-12-04T06:52:10,969 DEBUG [RS:0;607fd5c6574c:37507 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T06:52:10,969 DEBUG [RS:0;607fd5c6574c:37507 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:52:10,970 INFO [RS:0;607fd5c6574c:37507 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-04T06:52:10,970 INFO [RS:0;607fd5c6574c:37507 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-04T06:52:10,970 INFO [RS:0;607fd5c6574c:37507 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-04T06:52:10,970 INFO [RS:0;607fd5c6574c:37507 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-04T06:52:10,970 INFO [RS:0;607fd5c6574c:37507 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-04T06:52:10,970 DEBUG [RS:0;607fd5c6574c:37507 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-04T06:52:10,971 DEBUG [RS:0;607fd5c6574c:37507 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-04T06:52:10,971 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-04T06:52:10,971 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-04T06:52:10,971 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-04T06:52:10,971 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-04T06:52:10,971 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-04T06:52:10,971 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-04T06:52:10,993 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/data/hbase/meta/1588230740/.tmp/ns/6944194883134bf4840713a6450b58ca is 43, key is default/ns:d/1733295130834/Put/seqid=0 2024-12-04T06:52:11,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45527 is added to blk_1073741835_1011 (size=5153) 2024-12-04T06:52:11,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741835_1011 (size=5153) 2024-12-04T06:52:11,001 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/data/hbase/meta/1588230740/.tmp/ns/6944194883134bf4840713a6450b58ca 2024-12-04T06:52:11,009 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/data/hbase/meta/1588230740/.tmp/ns/6944194883134bf4840713a6450b58ca as hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/data/hbase/meta/1588230740/ns/6944194883134bf4840713a6450b58ca 2024-12-04T06:52:11,021 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/data/hbase/meta/1588230740/ns/6944194883134bf4840713a6450b58ca, entries=2, sequenceid=6, filesize=5.0 K 2024-12-04T06:52:11,023 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 52ms, sequenceid=6, compaction requested=false 2024-12-04T06:52:11,023 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-04T06:52:11,030 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-04T06:52:11,030 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-04T06:52:11,031 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-04T06:52:11,031 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733295130971Running coprocessor pre-close hooks at 1733295130971Disabling compacts and flushes for region at 1733295130971Disabling writes for close at 1733295130971Obtaining lock to block concurrent updates at 1733295130971Preparing flush snapshotting stores in 1588230740 at 1733295130971Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733295130972 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733295130973 (+1 ms)Flushing 1588230740/ns: creating writer at 1733295130973Flushing 1588230740/ns: appending metadata at 1733295130992 (+19 ms)Flushing 1588230740/ns: closing flushed file at 1733295130993 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@529ac3df: reopening flushed file at 1733295131008 (+15 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 52ms, sequenceid=6, compaction requested=false at 1733295131023 (+15 ms)Writing region close event to WAL at 1733295131025 (+2 ms)Running coprocessor post-close hooks at 1733295131030 (+5 ms)Closed at 1733295131030 2024-12-04T06:52:11,031 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-04T06:52:11,171 INFO [RS:0;607fd5c6574c:37507 {}] regionserver.HRegionServer(976): stopping server 607fd5c6574c,37507,1733295129783; all regions closed. 2024-12-04T06:52:11,171 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:11,172 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:11,172 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:11,172 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:11,172 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:11,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741834_1010 (size=1152) 2024-12-04T06:52:11,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45527 is added to blk_1073741834_1010 (size=1152) 2024-12-04T06:52:11,178 DEBUG [RS:0;607fd5c6574c:37507 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/oldWALs 2024-12-04T06:52:11,178 INFO [RS:0;607fd5c6574c:37507 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 607fd5c6574c%2C37507%2C1733295129783.meta:.meta(num 1733295130763) 2024-12-04T06:52:11,178 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:11,178 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:11,178 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:11,179 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:11,179 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:11,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45527 is added to blk_1073741833_1009 (size=93) 2024-12-04T06:52:11,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741833_1009 (size=93) 2024-12-04T06:52:11,183 DEBUG [RS:0;607fd5c6574c:37507 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/oldWALs 2024-12-04T06:52:11,183 INFO [RS:0;607fd5c6574c:37507 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 607fd5c6574c%2C37507%2C1733295129783:(num 1733295130360) 2024-12-04T06:52:11,183 DEBUG [RS:0;607fd5c6574c:37507 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:52:11,183 INFO [RS:0;607fd5c6574c:37507 {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T06:52:11,183 INFO [RS:0;607fd5c6574c:37507 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T06:52:11,184 INFO [RS:0;607fd5c6574c:37507 {}] hbase.ChoreService(370): Chore service for: regionserver/607fd5c6574c:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-04T06:52:11,184 INFO [RS:0;607fd5c6574c:37507 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T06:52:11,184 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T06:52:11,184 INFO [RS:0;607fd5c6574c:37507 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37507 2024-12-04T06:52:11,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37507-0x1017c3eaee70001, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/607fd5c6574c,37507,1733295129783 2024-12-04T06:52:11,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43845-0x1017c3eaee70000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T06:52:11,186 INFO [RS:0;607fd5c6574c:37507 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T06:52:11,188 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [607fd5c6574c,37507,1733295129783] 2024-12-04T06:52:11,190 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/607fd5c6574c,37507,1733295129783 already deleted, retry=false 2024-12-04T06:52:11,190 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 607fd5c6574c,37507,1733295129783 expired; onlineServers=0 2024-12-04T06:52:11,190 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '607fd5c6574c,43845,1733295129707' ***** 2024-12-04T06:52:11,190 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-04T06:52:11,190 INFO [M:0;607fd5c6574c:43845 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T06:52:11,191 INFO [M:0;607fd5c6574c:43845 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T06:52:11,191 DEBUG [M:0;607fd5c6574c:43845 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-04T06:52:11,191 DEBUG [M:0;607fd5c6574c:43845 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-04T06:52:11,191 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-04T06:52:11,191 DEBUG [master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.small.0-1733295130113 {}] cleaner.HFileCleaner(306): Exit Thread[master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.small.0-1733295130113,5,FailOnTimeoutGroup] 2024-12-04T06:52:11,191 DEBUG [master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.large.0-1733295130113 {}] cleaner.HFileCleaner(306): Exit Thread[master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.large.0-1733295130113,5,FailOnTimeoutGroup] 2024-12-04T06:52:11,191 INFO [M:0;607fd5c6574c:43845 {}] hbase.ChoreService(370): Chore service for: master/607fd5c6574c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-04T06:52:11,191 INFO [M:0;607fd5c6574c:43845 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T06:52:11,191 DEBUG [M:0;607fd5c6574c:43845 {}] master.HMaster(1795): Stopping service threads 2024-12-04T06:52:11,191 INFO [M:0;607fd5c6574c:43845 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-04T06:52:11,191 INFO [M:0;607fd5c6574c:43845 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-04T06:52:11,192 INFO [M:0;607fd5c6574c:43845 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-04T06:52:11,192 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-04T06:52:11,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43845-0x1017c3eaee70000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-04T06:52:11,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43845-0x1017c3eaee70000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:11,192 DEBUG [M:0;607fd5c6574c:43845 {}] zookeeper.ZKUtil(347): master:43845-0x1017c3eaee70000, quorum=127.0.0.1:52340, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-04T06:52:11,193 WARN [M:0;607fd5c6574c:43845 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-04T06:52:11,193 INFO [M:0;607fd5c6574c:43845 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/.lastflushedseqids 2024-12-04T06:52:11,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45527 is added to blk_1073741836_1012 (size=99) 2024-12-04T06:52:11,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741836_1012 (size=99) 2024-12-04T06:52:11,200 INFO [M:0;607fd5c6574c:43845 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-04T06:52:11,200 INFO [M:0;607fd5c6574c:43845 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-04T06:52:11,200 DEBUG [M:0;607fd5c6574c:43845 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-04T06:52:11,200 INFO [M:0;607fd5c6574c:43845 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:52:11,200 DEBUG [M:0;607fd5c6574c:43845 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:52:11,200 DEBUG [M:0;607fd5c6574c:43845 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-04T06:52:11,200 DEBUG [M:0;607fd5c6574c:43845 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:52:11,200 INFO [M:0;607fd5c6574c:43845 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-04T06:52:11,219 DEBUG [M:0;607fd5c6574c:43845 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/67a20cbe138c44ceb48c26c172f80493 is 82, key is hbase:meta,,1/info:regioninfo/1733295130809/Put/seqid=0 2024-12-04T06:52:11,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741837_1013 (size=5672) 2024-12-04T06:52:11,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45527 is added to blk_1073741837_1013 (size=5672) 2024-12-04T06:52:11,226 INFO [M:0;607fd5c6574c:43845 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/67a20cbe138c44ceb48c26c172f80493 2024-12-04T06:52:11,255 DEBUG [M:0;607fd5c6574c:43845 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6b2529d7607641829abfc42b96332bbb is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733295130842/Put/seqid=0 2024-12-04T06:52:11,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741838_1014 (size=5275) 2024-12-04T06:52:11,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45527 is added to blk_1073741838_1014 (size=5275) 2024-12-04T06:52:11,262 INFO [M:0;607fd5c6574c:43845 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6b2529d7607641829abfc42b96332bbb 2024-12-04T06:52:11,284 DEBUG [M:0;607fd5c6574c:43845 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/766eb368b3c0460292eb34878e6bac9b is 69, key is 607fd5c6574c,37507,1733295129783/rs:state/1733295130173/Put/seqid=0 2024-12-04T06:52:11,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37507-0x1017c3eaee70001, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T06:52:11,288 INFO [RS:0;607fd5c6574c:37507 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T06:52:11,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37507-0x1017c3eaee70001, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T06:52:11,288 INFO [RS:0;607fd5c6574c:37507 {}] regionserver.HRegionServer(1031): Exiting; stopping=607fd5c6574c,37507,1733295129783; zookeeper connection closed. 2024-12-04T06:52:11,288 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2975e0f5 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2975e0f5 2024-12-04T06:52:11,289 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-04T06:52:11,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741839_1015 (size=5156) 2024-12-04T06:52:11,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45527 is added to blk_1073741839_1015 (size=5156) 2024-12-04T06:52:11,290 INFO [M:0;607fd5c6574c:43845 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/766eb368b3c0460292eb34878e6bac9b 2024-12-04T06:52:11,313 DEBUG [M:0;607fd5c6574c:43845 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/19c632b43a774f45849739d9ddcfceff is 52, key is load_balancer_on/state:d/1733295130962/Put/seqid=0 2024-12-04T06:52:11,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741840_1016 (size=5056) 2024-12-04T06:52:11,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45527 is added to blk_1073741840_1016 (size=5056) 2024-12-04T06:52:11,320 INFO [M:0;607fd5c6574c:43845 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/19c632b43a774f45849739d9ddcfceff 2024-12-04T06:52:11,330 DEBUG [M:0;607fd5c6574c:43845 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/67a20cbe138c44ceb48c26c172f80493 as hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/67a20cbe138c44ceb48c26c172f80493 2024-12-04T06:52:11,338 INFO [M:0;607fd5c6574c:43845 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/67a20cbe138c44ceb48c26c172f80493, entries=8, sequenceid=29, filesize=5.5 K 2024-12-04T06:52:11,340 DEBUG [M:0;607fd5c6574c:43845 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6b2529d7607641829abfc42b96332bbb as hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6b2529d7607641829abfc42b96332bbb 2024-12-04T06:52:11,348 INFO [M:0;607fd5c6574c:43845 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6b2529d7607641829abfc42b96332bbb, entries=3, sequenceid=29, filesize=5.2 K 2024-12-04T06:52:11,350 DEBUG [M:0;607fd5c6574c:43845 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/766eb368b3c0460292eb34878e6bac9b as hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/766eb368b3c0460292eb34878e6bac9b 2024-12-04T06:52:11,357 INFO [M:0;607fd5c6574c:43845 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/766eb368b3c0460292eb34878e6bac9b, entries=1, sequenceid=29, filesize=5.0 K 2024-12-04T06:52:11,358 DEBUG [M:0;607fd5c6574c:43845 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/19c632b43a774f45849739d9ddcfceff as hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/19c632b43a774f45849739d9ddcfceff 2024-12-04T06:52:11,365 INFO [M:0;607fd5c6574c:43845 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42649/user/jenkins/test-data/0b08ac56-f2d8-1390-a5f5-f140e1ba2c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/19c632b43a774f45849739d9ddcfceff, entries=1, sequenceid=29, filesize=4.9 K 2024-12-04T06:52:11,366 INFO [M:0;607fd5c6574c:43845 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 166ms, sequenceid=29, compaction requested=false 2024-12-04T06:52:11,368 INFO [M:0;607fd5c6574c:43845 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:52:11,368 DEBUG [M:0;607fd5c6574c:43845 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733295131200Disabling compacts and flushes for region at 1733295131200Disabling writes for close at 1733295131200Obtaining lock to block concurrent updates at 1733295131200Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733295131200Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733295131201 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733295131201Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733295131202 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733295131218 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733295131218Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733295131234 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733295131254 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733295131254Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733295131267 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733295131283 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733295131283Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733295131296 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733295131312 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733295131312Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2ffc628f: reopening flushed file at 1733295131328 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4bfe57f1: reopening flushed file at 1733295131339 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4ad41683: reopening flushed file at 1733295131348 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@423393b9: reopening flushed file at 1733295131357 (+9 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 166ms, sequenceid=29, compaction requested=false at 1733295131366 (+9 ms)Writing region close event to WAL at 1733295131368 (+2 ms)Closed at 1733295131368 2024-12-04T06:52:11,369 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:11,369 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:11,369 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:11,369 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:11,369 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:11,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45511 is added to blk_1073741830_1006 (size=10311) 2024-12-04T06:52:11,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45527 is added to blk_1073741830_1006 (size=10311) 2024-12-04T06:52:11,374 INFO [M:0;607fd5c6574c:43845 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-04T06:52:11,374 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T06:52:11,374 INFO [M:0;607fd5c6574c:43845 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43845 2024-12-04T06:52:11,374 INFO [M:0;607fd5c6574c:43845 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T06:52:11,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43845-0x1017c3eaee70000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T06:52:11,476 INFO [M:0;607fd5c6574c:43845 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T06:52:11,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43845-0x1017c3eaee70000, quorum=127.0.0.1:52340, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T06:52:11,479 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6838bf55{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:52:11,480 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@254e0164{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T06:52:11,480 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T06:52:11,480 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c1be80f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T06:52:11,480 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@274298f3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/hadoop.log.dir/,STOPPED} 2024-12-04T06:52:11,482 WARN [BP-678346079-172.17.0.2-1733295128832 heartbeating to localhost/127.0.0.1:42649 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T06:52:11,482 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T06:52:11,482 WARN [BP-678346079-172.17.0.2-1733295128832 heartbeating to localhost/127.0.0.1:42649 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-678346079-172.17.0.2-1733295128832 (Datanode Uuid a86fc730-5639-4f00-aae8-733dbef89a3a) service to localhost/127.0.0.1:42649 2024-12-04T06:52:11,482 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T06:52:11,483 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/cluster_d9efd569-6419-5db6-32ee-d01d56c44834/data/data3/current/BP-678346079-172.17.0.2-1733295128832 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:52:11,483 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/cluster_d9efd569-6419-5db6-32ee-d01d56c44834/data/data4/current/BP-678346079-172.17.0.2-1733295128832 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:52:11,484 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T06:52:11,489 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2152d149{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:52:11,490 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1d281952{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T06:52:11,490 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T06:52:11,490 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@eab7acc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T06:52:11,490 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@469dec96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/hadoop.log.dir/,STOPPED} 2024-12-04T06:52:11,492 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T06:52:11,492 WARN [BP-678346079-172.17.0.2-1733295128832 heartbeating to localhost/127.0.0.1:42649 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T06:52:11,492 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T06:52:11,492 WARN [BP-678346079-172.17.0.2-1733295128832 heartbeating to localhost/127.0.0.1:42649 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-678346079-172.17.0.2-1733295128832 (Datanode Uuid ee65e552-8c8e-4b1c-a90b-f7d2727f3f21) service to localhost/127.0.0.1:42649 2024-12-04T06:52:11,493 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/cluster_d9efd569-6419-5db6-32ee-d01d56c44834/data/data1/current/BP-678346079-172.17.0.2-1733295128832 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:52:11,493 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/cluster_d9efd569-6419-5db6-32ee-d01d56c44834/data/data2/current/BP-678346079-172.17.0.2-1733295128832 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:52:11,494 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T06:52:11,502 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@49a88a00{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-04T06:52:11,502 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4e4256d4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T06:52:11,503 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T06:52:11,503 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3197ca45{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T06:52:11,503 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f681677{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/hadoop.log.dir/,STOPPED} 2024-12-04T06:52:11,511 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-04T06:52:11,532 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-04T06:52:11,532 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-04T06:52:11,532 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/hadoop.log.dir so I do NOT create it in target/test-data/3746517b-7168-b890-1443-97f7d613219b 2024-12-04T06:52:11,532 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b9a0fbc-b186-9175-3f03-25e905b9f20a/hadoop.tmp.dir so I do NOT create it in target/test-data/3746517b-7168-b890-1443-97f7d613219b 2024-12-04T06:52:11,532 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75, deleteOnExit=true 2024-12-04T06:52:11,532 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-04T06:52:11,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/test.cache.data in system properties and HBase conf 2024-12-04T06:52:11,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/hadoop.tmp.dir in system properties and HBase conf 2024-12-04T06:52:11,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/hadoop.log.dir in system properties and HBase conf 2024-12-04T06:52:11,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-04T06:52:11,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-04T06:52:11,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-04T06:52:11,533 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-04T06:52:11,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-04T06:52:11,534 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-04T06:52:11,534 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-04T06:52:11,534 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-04T06:52:11,534 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-04T06:52:11,534 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-04T06:52:11,534 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-04T06:52:11,534 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-04T06:52:11,534 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-04T06:52:11,534 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/nfs.dump.dir in system properties and HBase conf 2024-12-04T06:52:11,534 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/java.io.tmpdir in system properties and HBase conf 2024-12-04T06:52:11,534 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-04T06:52:11,534 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-04T06:52:11,534 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-04T06:52:11,549 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-04T06:52:11,621 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:52:11,630 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T06:52:11,632 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T06:52:11,632 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T06:52:11,632 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T06:52:11,633 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:52:11,633 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@42b52d44{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/hadoop.log.dir/,AVAILABLE} 2024-12-04T06:52:11,634 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2305029e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T06:52:11,766 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@62b96b7c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/java.io.tmpdir/jetty-localhost-38627-hadoop-hdfs-3_4_1-tests_jar-_-any-5453932732881808476/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-04T06:52:11,767 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2fd186ec{HTTP/1.1, (http/1.1)}{localhost:38627} 2024-12-04T06:52:11,767 INFO [Time-limited test {}] server.Server(415): Started @104292ms 2024-12-04T06:52:11,789 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-04T06:52:11,881 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:52:11,886 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T06:52:11,889 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T06:52:11,889 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T06:52:11,889 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-04T06:52:11,890 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5a107105{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/hadoop.log.dir/,AVAILABLE} 2024-12-04T06:52:11,890 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@48743db4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T06:52:12,022 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@14d09ab9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/java.io.tmpdir/jetty-localhost-42021-hadoop-hdfs-3_4_1-tests_jar-_-any-14027832215633823360/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:52:12,022 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@ee6b493{HTTP/1.1, (http/1.1)}{localhost:42021} 2024-12-04T06:52:12,023 INFO [Time-limited test {}] server.Server(415): Started @104547ms 2024-12-04T06:52:12,024 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T06:52:12,061 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:52:12,066 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T06:52:12,067 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T06:52:12,067 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T06:52:12,067 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-04T06:52:12,068 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7eee535{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/hadoop.log.dir/,AVAILABLE} 2024-12-04T06:52:12,069 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ec1a06e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T06:52:12,142 WARN [Thread-657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data1/current/BP-1601631713-172.17.0.2-1733295131567/current, will proceed with Du for space computation calculation, 2024-12-04T06:52:12,142 WARN [Thread-658 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data2/current/BP-1601631713-172.17.0.2-1733295131567/current, will proceed with Du for space computation calculation, 2024-12-04T06:52:12,165 WARN [Thread-636 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T06:52:12,168 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x53236e5142ca13f0 with lease ID 0x3cd9640a93ef8921: Processing first storage report for DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c from datanode DatanodeRegistration(127.0.0.1:45609, datanodeUuid=546df34f-c7f7-4370-ba95-552dcf23bcb4, infoPort=38487, infoSecurePort=0, ipcPort=41113, storageInfo=lv=-57;cid=testClusterID;nsid=2075415593;c=1733295131567) 2024-12-04T06:52:12,168 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x53236e5142ca13f0 with lease ID 0x3cd9640a93ef8921: from storage DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c node DatanodeRegistration(127.0.0.1:45609, datanodeUuid=546df34f-c7f7-4370-ba95-552dcf23bcb4, infoPort=38487, infoSecurePort=0, ipcPort=41113, storageInfo=lv=-57;cid=testClusterID;nsid=2075415593;c=1733295131567), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:52:12,168 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x53236e5142ca13f0 with lease ID 0x3cd9640a93ef8921: Processing first storage report for DS-26219d06-8954-4eb7-9b79-632fd87981b3 from datanode DatanodeRegistration(127.0.0.1:45609, datanodeUuid=546df34f-c7f7-4370-ba95-552dcf23bcb4, infoPort=38487, infoSecurePort=0, ipcPort=41113, storageInfo=lv=-57;cid=testClusterID;nsid=2075415593;c=1733295131567) 2024-12-04T06:52:12,168 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x53236e5142ca13f0 with lease ID 0x3cd9640a93ef8921: from storage DS-26219d06-8954-4eb7-9b79-632fd87981b3 node DatanodeRegistration(127.0.0.1:45609, datanodeUuid=546df34f-c7f7-4370-ba95-552dcf23bcb4, infoPort=38487, infoSecurePort=0, ipcPort=41113, storageInfo=lv=-57;cid=testClusterID;nsid=2075415593;c=1733295131567), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:52:12,192 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7330fb3f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/java.io.tmpdir/jetty-localhost-33485-hadoop-hdfs-3_4_1-tests_jar-_-any-12870875323072893642/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:52:12,192 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@20aa2ea7{HTTP/1.1, (http/1.1)}{localhost:33485} 2024-12-04T06:52:12,192 INFO [Time-limited test {}] server.Server(415): Started @104717ms 2024-12-04T06:52:12,194 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T06:52:12,202 INFO [regionserver/607fd5c6574c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T06:52:12,299 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data3/current/BP-1601631713-172.17.0.2-1733295131567/current, will proceed with Du for space computation calculation, 2024-12-04T06:52:12,299 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data4/current/BP-1601631713-172.17.0.2-1733295131567/current, will proceed with Du for space computation calculation, 2024-12-04T06:52:12,320 WARN [Thread-672 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T06:52:12,323 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x713ba5799e7c7032 with lease ID 0x3cd9640a93ef8922: Processing first storage report for DS-2f609111-e44f-4cdd-9e6d-8dc690e52386 from datanode DatanodeRegistration(127.0.0.1:33043, datanodeUuid=068c6fc1-5d76-4592-a144-8a94d67736fb, infoPort=34647, infoSecurePort=0, ipcPort=36811, storageInfo=lv=-57;cid=testClusterID;nsid=2075415593;c=1733295131567) 2024-12-04T06:52:12,323 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x713ba5799e7c7032 with lease ID 0x3cd9640a93ef8922: from storage DS-2f609111-e44f-4cdd-9e6d-8dc690e52386 node DatanodeRegistration(127.0.0.1:33043, datanodeUuid=068c6fc1-5d76-4592-a144-8a94d67736fb, infoPort=34647, infoSecurePort=0, ipcPort=36811, storageInfo=lv=-57;cid=testClusterID;nsid=2075415593;c=1733295131567), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:52:12,323 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x713ba5799e7c7032 with lease ID 0x3cd9640a93ef8922: Processing first storage report for DS-a550b0e7-8b53-489a-a0f2-0e01b9e8ef92 from datanode DatanodeRegistration(127.0.0.1:33043, datanodeUuid=068c6fc1-5d76-4592-a144-8a94d67736fb, infoPort=34647, infoSecurePort=0, ipcPort=36811, storageInfo=lv=-57;cid=testClusterID;nsid=2075415593;c=1733295131567) 2024-12-04T06:52:12,323 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x713ba5799e7c7032 with lease ID 0x3cd9640a93ef8922: from storage DS-a550b0e7-8b53-489a-a0f2-0e01b9e8ef92 node DatanodeRegistration(127.0.0.1:33043, datanodeUuid=068c6fc1-5d76-4592-a144-8a94d67736fb, infoPort=34647, infoSecurePort=0, ipcPort=36811, storageInfo=lv=-57;cid=testClusterID;nsid=2075415593;c=1733295131567), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:52:12,424 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b 2024-12-04T06:52:12,427 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/zookeeper_0, clientPort=54010, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-04T06:52:12,429 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54010 2024-12-04T06:52:12,429 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:52:12,431 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:52:12,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33043 is added to blk_1073741825_1001 (size=7) 2024-12-04T06:52:12,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45609 is added to blk_1073741825_1001 (size=7) 2024-12-04T06:52:12,448 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12 with version=8 2024-12-04T06:52:12,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/hbase-staging 2024-12-04T06:52:12,451 INFO [Time-limited test {}] client.ConnectionUtils(128): master/607fd5c6574c:0 server-side Connection retries=45 2024-12-04T06:52:12,451 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T06:52:12,451 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T06:52:12,451 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T06:52:12,451 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T06:52:12,451 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T06:52:12,452 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-04T06:52:12,452 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T06:52:12,453 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45843 2024-12-04T06:52:12,454 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45843 connecting to ZooKeeper ensemble=127.0.0.1:54010 2024-12-04T06:52:12,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:458430x0, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T06:52:12,466 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45843-0x1017c3eb99a0000 connected 2024-12-04T06:52:12,487 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:52:12,489 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:52:12,491 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45843-0x1017c3eb99a0000, quorum=127.0.0.1:54010, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T06:52:12,492 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12, hbase.cluster.distributed=false 2024-12-04T06:52:12,493 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45843-0x1017c3eb99a0000, quorum=127.0.0.1:54010, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T06:52:12,496 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45843 2024-12-04T06:52:12,496 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45843 2024-12-04T06:52:12,498 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45843 2024-12-04T06:52:12,503 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45843 2024-12-04T06:52:12,504 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45843 2024-12-04T06:52:12,531 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/607fd5c6574c:0 server-side Connection retries=45 2024-12-04T06:52:12,531 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T06:52:12,531 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T06:52:12,531 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T06:52:12,531 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T06:52:12,531 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T06:52:12,531 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-04T06:52:12,532 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T06:52:12,533 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33613 2024-12-04T06:52:12,535 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33613 connecting to ZooKeeper ensemble=127.0.0.1:54010 2024-12-04T06:52:12,536 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:52:12,539 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:52:12,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:336130x0, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T06:52:12,546 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33613-0x1017c3eb99a0001, quorum=127.0.0.1:54010, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T06:52:12,546 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33613-0x1017c3eb99a0001 connected 2024-12-04T06:52:12,547 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-04T06:52:12,547 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-04T06:52:12,548 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33613-0x1017c3eb99a0001, quorum=127.0.0.1:54010, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-04T06:52:12,549 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33613-0x1017c3eb99a0001, quorum=127.0.0.1:54010, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T06:52:12,552 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33613 2024-12-04T06:52:12,552 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33613 2024-12-04T06:52:12,552 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33613 2024-12-04T06:52:12,556 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33613 2024-12-04T06:52:12,556 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33613 2024-12-04T06:52:12,570 DEBUG [M:0;607fd5c6574c:45843 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;607fd5c6574c:45843 2024-12-04T06:52:12,571 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/607fd5c6574c,45843,1733295132451 2024-12-04T06:52:12,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45843-0x1017c3eb99a0000, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T06:52:12,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33613-0x1017c3eb99a0001, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T06:52:12,574 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45843-0x1017c3eb99a0000, quorum=127.0.0.1:54010, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/607fd5c6574c,45843,1733295132451 2024-12-04T06:52:12,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45843-0x1017c3eb99a0000, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:12,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33613-0x1017c3eb99a0001, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-04T06:52:12,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33613-0x1017c3eb99a0001, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:12,576 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45843-0x1017c3eb99a0000, quorum=127.0.0.1:54010, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-04T06:52:12,576 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/607fd5c6574c,45843,1733295132451 from backup master directory 2024-12-04T06:52:12,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45843-0x1017c3eb99a0000, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/607fd5c6574c,45843,1733295132451 2024-12-04T06:52:12,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45843-0x1017c3eb99a0000, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T06:52:12,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33613-0x1017c3eb99a0001, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T06:52:12,578 WARN [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T06:52:12,578 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=607fd5c6574c,45843,1733295132451 2024-12-04T06:52:12,584 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/hbase.id] with ID: e6fd505c-fa26-47ad-839d-051ea7a94883 2024-12-04T06:52:12,584 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/.tmp/hbase.id 2024-12-04T06:52:12,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45609 is added to blk_1073741826_1002 (size=42) 2024-12-04T06:52:12,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33043 is added to blk_1073741826_1002 (size=42) 2024-12-04T06:52:12,594 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/.tmp/hbase.id]:[hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/hbase.id] 2024-12-04T06:52:12,608 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:52:12,609 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-04T06:52:12,610 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-04T06:52:12,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45843-0x1017c3eb99a0000, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:12,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33613-0x1017c3eb99a0001, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:12,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45609 is added to blk_1073741827_1003 (size=196) 2024-12-04T06:52:12,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33043 is added to blk_1073741827_1003 (size=196) 2024-12-04T06:52:12,631 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T06:52:12,632 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-04T06:52:12,632 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T06:52:12,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33043 is added to blk_1073741828_1004 (size=1189) 2024-12-04T06:52:12,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45609 is added to blk_1073741828_1004 (size=1189) 2024-12-04T06:52:12,645 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/data/master/store 2024-12-04T06:52:12,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45609 is added to blk_1073741829_1005 (size=34) 2024-12-04T06:52:12,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33043 is added to blk_1073741829_1005 (size=34) 2024-12-04T06:52:12,654 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:52:12,654 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-04T06:52:12,654 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:52:12,654 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:52:12,654 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-04T06:52:12,655 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:52:12,655 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:52:12,655 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733295132654Disabling compacts and flushes for region at 1733295132654Disabling writes for close at 1733295132654Writing region close event to WAL at 1733295132655 (+1 ms)Closed at 1733295132655 2024-12-04T06:52:12,656 WARN [master/607fd5c6574c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/data/master/store/.initializing 2024-12-04T06:52:12,656 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/WALs/607fd5c6574c,45843,1733295132451 2024-12-04T06:52:12,659 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=607fd5c6574c%2C45843%2C1733295132451, suffix=, logDir=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/WALs/607fd5c6574c,45843,1733295132451, archiveDir=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/oldWALs, maxLogs=10 2024-12-04T06:52:12,659 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C45843%2C1733295132451.1733295132659 2024-12-04T06:52:12,665 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/WALs/607fd5c6574c,45843,1733295132451/607fd5c6574c%2C45843%2C1733295132451.1733295132659 2024-12-04T06:52:12,668 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38487:38487),(127.0.0.1/127.0.0.1:34647:34647)] 2024-12-04T06:52:12,670 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-04T06:52:12,670 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:52:12,670 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:12,670 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:12,672 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:12,673 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-04T06:52:12,673 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:12,674 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:52:12,674 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:12,675 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-04T06:52:12,675 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:12,676 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T06:52:12,676 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:12,677 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-04T06:52:12,677 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:12,678 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T06:52:12,678 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:12,679 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-04T06:52:12,679 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:12,680 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T06:52:12,680 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:12,681 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:12,681 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:12,683 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:12,683 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:12,684 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-04T06:52:12,685 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:12,687 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T06:52:12,688 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=848176, jitterRate=0.07851281762123108}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-04T06:52:12,689 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733295132670Initializing all the Stores at 1733295132671 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295132671Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295132671Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295132671Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295132671Cleaning up temporary data from old regions at 1733295132683 (+12 ms)Region opened successfully at 1733295132689 (+6 ms) 2024-12-04T06:52:12,689 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-04T06:52:12,693 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77dcf3f7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=607fd5c6574c/172.17.0.2:0 2024-12-04T06:52:12,694 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-04T06:52:12,694 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-04T06:52:12,694 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-04T06:52:12,695 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-04T06:52:12,695 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-04T06:52:12,695 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-04T06:52:12,696 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-04T06:52:12,698 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-04T06:52:12,699 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45843-0x1017c3eb99a0000, quorum=127.0.0.1:54010, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-04T06:52:12,700 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-04T06:52:12,701 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-04T06:52:12,701 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45843-0x1017c3eb99a0000, quorum=127.0.0.1:54010, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-04T06:52:12,702 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-04T06:52:12,703 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-04T06:52:12,703 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45843-0x1017c3eb99a0000, quorum=127.0.0.1:54010, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-04T06:52:12,705 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-04T06:52:12,705 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45843-0x1017c3eb99a0000, quorum=127.0.0.1:54010, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-04T06:52:12,709 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-04T06:52:12,710 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45843-0x1017c3eb99a0000, quorum=127.0.0.1:54010, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-04T06:52:12,711 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-04T06:52:12,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45843-0x1017c3eb99a0000, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T06:52:12,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45843-0x1017c3eb99a0000, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:12,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33613-0x1017c3eb99a0001, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T06:52:12,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33613-0x1017c3eb99a0001, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:12,714 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=607fd5c6574c,45843,1733295132451, sessionid=0x1017c3eb99a0000, setting cluster-up flag (Was=false) 2024-12-04T06:52:12,720 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45843-0x1017c3eb99a0000, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:12,720 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33613-0x1017c3eb99a0001, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:12,725 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-04T06:52:12,726 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=607fd5c6574c,45843,1733295132451 2024-12-04T06:52:12,731 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33613-0x1017c3eb99a0001, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:12,731 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45843-0x1017c3eb99a0000, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:12,737 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-04T06:52:12,738 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=607fd5c6574c,45843,1733295132451 2024-12-04T06:52:12,739 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-04T06:52:12,741 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-04T06:52:12,742 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-04T06:52:12,742 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-04T06:52:12,742 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 607fd5c6574c,45843,1733295132451 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-04T06:52:12,744 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/607fd5c6574c:0, corePoolSize=5, maxPoolSize=5 2024-12-04T06:52:12,744 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/607fd5c6574c:0, corePoolSize=5, maxPoolSize=5 2024-12-04T06:52:12,744 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/607fd5c6574c:0, corePoolSize=5, maxPoolSize=5 2024-12-04T06:52:12,744 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/607fd5c6574c:0, corePoolSize=5, maxPoolSize=5 2024-12-04T06:52:12,744 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/607fd5c6574c:0, corePoolSize=10, maxPoolSize=10 2024-12-04T06:52:12,744 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:12,744 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/607fd5c6574c:0, corePoolSize=2, maxPoolSize=2 2024-12-04T06:52:12,744 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:12,745 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733295162745 2024-12-04T06:52:12,745 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-04T06:52:12,745 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-04T06:52:12,745 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-04T06:52:12,745 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-04T06:52:12,745 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-04T06:52:12,745 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-04T06:52:12,745 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:12,746 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-04T06:52:12,746 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-04T06:52:12,746 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T06:52:12,746 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-04T06:52:12,746 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-04T06:52:12,747 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-04T06:52:12,747 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-04T06:52:12,747 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.large.0-1733295132747,5,FailOnTimeoutGroup] 2024-12-04T06:52:12,748 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.small.0-1733295132747,5,FailOnTimeoutGroup] 2024-12-04T06:52:12,748 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:12,748 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-04T06:52:12,748 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:12,748 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:12,748 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:12,748 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-04T06:52:12,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33043 is added to blk_1073741831_1007 (size=1321) 2024-12-04T06:52:12,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45609 is added to blk_1073741831_1007 (size=1321) 2024-12-04T06:52:12,757 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-04T06:52:12,757 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12 2024-12-04T06:52:12,758 INFO [RS:0;607fd5c6574c:33613 {}] regionserver.HRegionServer(746): ClusterId : e6fd505c-fa26-47ad-839d-051ea7a94883 2024-12-04T06:52:12,759 DEBUG [RS:0;607fd5c6574c:33613 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-04T06:52:12,761 DEBUG [RS:0;607fd5c6574c:33613 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-04T06:52:12,761 DEBUG [RS:0;607fd5c6574c:33613 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-04T06:52:12,763 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-04T06:52:12,763 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-04T06:52:12,764 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-04T06:52:12,765 DEBUG [RS:0;607fd5c6574c:33613 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-04T06:52:12,765 DEBUG [RS:0;607fd5c6574c:33613 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44c4aeb6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=607fd5c6574c/172.17.0.2:0 2024-12-04T06:52:12,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45609 is added to blk_1073741832_1008 (size=32) 2024-12-04T06:52:12,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33043 is added to blk_1073741832_1008 (size=32) 2024-12-04T06:52:12,779 DEBUG [RS:0;607fd5c6574c:33613 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;607fd5c6574c:33613 2024-12-04T06:52:12,779 INFO [RS:0;607fd5c6574c:33613 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-04T06:52:12,779 INFO [RS:0;607fd5c6574c:33613 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-04T06:52:12,779 DEBUG [RS:0;607fd5c6574c:33613 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-04T06:52:12,780 INFO [RS:0;607fd5c6574c:33613 {}] regionserver.HRegionServer(2659): reportForDuty to master=607fd5c6574c,45843,1733295132451 with port=33613, startcode=1733295132530 2024-12-04T06:52:12,780 DEBUG [RS:0;607fd5c6574c:33613 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-04T06:52:12,782 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40537, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-04T06:52:12,783 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45843 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 607fd5c6574c,33613,1733295132530 2024-12-04T06:52:12,783 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45843 {}] master.ServerManager(517): Registering regionserver=607fd5c6574c,33613,1733295132530 2024-12-04T06:52:12,785 DEBUG [RS:0;607fd5c6574c:33613 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12 2024-12-04T06:52:12,785 DEBUG [RS:0;607fd5c6574c:33613 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41097 2024-12-04T06:52:12,785 DEBUG [RS:0;607fd5c6574c:33613 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-04T06:52:12,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45843-0x1017c3eb99a0000, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T06:52:12,787 DEBUG [RS:0;607fd5c6574c:33613 {}] zookeeper.ZKUtil(111): regionserver:33613-0x1017c3eb99a0001, quorum=127.0.0.1:54010, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/607fd5c6574c,33613,1733295132530 2024-12-04T06:52:12,787 WARN [RS:0;607fd5c6574c:33613 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T06:52:12,787 INFO [RS:0;607fd5c6574c:33613 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T06:52:12,788 DEBUG [RS:0;607fd5c6574c:33613 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530 2024-12-04T06:52:12,788 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [607fd5c6574c,33613,1733295132530] 2024-12-04T06:52:12,791 INFO [RS:0;607fd5c6574c:33613 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-04T06:52:12,794 INFO [RS:0;607fd5c6574c:33613 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-04T06:52:12,794 INFO [RS:0;607fd5c6574c:33613 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-04T06:52:12,795 INFO [RS:0;607fd5c6574c:33613 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:12,795 INFO [RS:0;607fd5c6574c:33613 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-04T06:52:12,796 INFO [RS:0;607fd5c6574c:33613 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-04T06:52:12,796 INFO [RS:0;607fd5c6574c:33613 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:12,796 DEBUG [RS:0;607fd5c6574c:33613 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:12,796 DEBUG [RS:0;607fd5c6574c:33613 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:12,796 DEBUG [RS:0;607fd5c6574c:33613 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:12,797 DEBUG [RS:0;607fd5c6574c:33613 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:12,797 DEBUG [RS:0;607fd5c6574c:33613 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:12,797 DEBUG [RS:0;607fd5c6574c:33613 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/607fd5c6574c:0, corePoolSize=2, maxPoolSize=2 2024-12-04T06:52:12,797 DEBUG [RS:0;607fd5c6574c:33613 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:12,797 DEBUG [RS:0;607fd5c6574c:33613 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:12,797 DEBUG [RS:0;607fd5c6574c:33613 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:12,797 DEBUG [RS:0;607fd5c6574c:33613 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:12,797 DEBUG [RS:0;607fd5c6574c:33613 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:12,797 DEBUG [RS:0;607fd5c6574c:33613 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:12,797 DEBUG [RS:0;607fd5c6574c:33613 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/607fd5c6574c:0, corePoolSize=3, maxPoolSize=3 2024-12-04T06:52:12,798 DEBUG [RS:0;607fd5c6574c:33613 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0, corePoolSize=3, maxPoolSize=3 2024-12-04T06:52:12,801 INFO [RS:0;607fd5c6574c:33613 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:12,801 INFO [RS:0;607fd5c6574c:33613 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:12,801 INFO [RS:0;607fd5c6574c:33613 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:12,801 INFO [RS:0;607fd5c6574c:33613 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:12,801 INFO [RS:0;607fd5c6574c:33613 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:12,801 INFO [RS:0;607fd5c6574c:33613 {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,33613,1733295132530-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T06:52:12,827 INFO [RS:0;607fd5c6574c:33613 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-04T06:52:12,827 INFO [RS:0;607fd5c6574c:33613 {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,33613,1733295132530-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:12,827 INFO [RS:0;607fd5c6574c:33613 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:12,827 INFO [RS:0;607fd5c6574c:33613 {}] regionserver.Replication(171): 607fd5c6574c,33613,1733295132530 started 2024-12-04T06:52:12,849 INFO [RS:0;607fd5c6574c:33613 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:12,849 INFO [RS:0;607fd5c6574c:33613 {}] regionserver.HRegionServer(1482): Serving as 607fd5c6574c,33613,1733295132530, RpcServer on 607fd5c6574c/172.17.0.2:33613, sessionid=0x1017c3eb99a0001 2024-12-04T06:52:12,850 DEBUG [RS:0;607fd5c6574c:33613 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-04T06:52:12,850 DEBUG [RS:0;607fd5c6574c:33613 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 607fd5c6574c,33613,1733295132530 2024-12-04T06:52:12,850 DEBUG [RS:0;607fd5c6574c:33613 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '607fd5c6574c,33613,1733295132530' 2024-12-04T06:52:12,850 DEBUG [RS:0;607fd5c6574c:33613 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-04T06:52:12,850 DEBUG [RS:0;607fd5c6574c:33613 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-04T06:52:12,851 DEBUG [RS:0;607fd5c6574c:33613 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-04T06:52:12,851 DEBUG [RS:0;607fd5c6574c:33613 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-04T06:52:12,851 DEBUG [RS:0;607fd5c6574c:33613 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 607fd5c6574c,33613,1733295132530 2024-12-04T06:52:12,851 DEBUG [RS:0;607fd5c6574c:33613 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '607fd5c6574c,33613,1733295132530' 2024-12-04T06:52:12,851 DEBUG [RS:0;607fd5c6574c:33613 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-04T06:52:12,852 DEBUG [RS:0;607fd5c6574c:33613 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-04T06:52:12,853 DEBUG [RS:0;607fd5c6574c:33613 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-04T06:52:12,853 INFO [RS:0;607fd5c6574c:33613 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-04T06:52:12,853 INFO [RS:0;607fd5c6574c:33613 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-04T06:52:12,854 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:52:12,860 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:52:12,955 INFO [RS:0;607fd5c6574c:33613 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=607fd5c6574c%2C33613%2C1733295132530, suffix=, logDir=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530, archiveDir=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/oldWALs, maxLogs=32 2024-12-04T06:52:12,957 INFO [RS:0;607fd5c6574c:33613 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C33613%2C1733295132530.1733295132956 2024-12-04T06:52:12,968 INFO [RS:0;607fd5c6574c:33613 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.1733295132956 2024-12-04T06:52:12,969 DEBUG [RS:0;607fd5c6574c:33613 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34647:34647),(127.0.0.1/127.0.0.1:38487:38487)] 2024-12-04T06:52:13,167 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:52:13,169 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-04T06:52:13,170 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-04T06:52:13,171 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:13,171 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:52:13,171 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-04T06:52:13,173 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-04T06:52:13,173 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:13,173 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:52:13,173 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-04T06:52:13,175 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-04T06:52:13,175 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:13,175 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:52:13,176 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-04T06:52:13,177 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-04T06:52:13,177 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:13,178 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:52:13,178 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-04T06:52:13,179 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/hbase/meta/1588230740 2024-12-04T06:52:13,179 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/hbase/meta/1588230740 2024-12-04T06:52:13,181 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-04T06:52:13,181 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-04T06:52:13,181 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T06:52:13,183 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-04T06:52:13,185 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T06:52:13,186 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=866445, jitterRate=0.10174182057380676}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T06:52:13,187 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733295133167Initializing all the Stores at 1733295133168 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295133168Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295133168Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295133168Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295133168Cleaning up temporary data from old regions at 1733295133181 (+13 ms)Region opened successfully at 1733295133187 (+6 ms) 2024-12-04T06:52:13,187 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-04T06:52:13,187 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-04T06:52:13,187 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-04T06:52:13,187 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-04T06:52:13,187 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-04T06:52:13,188 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-04T06:52:13,188 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733295133187Disabling compacts and flushes for region at 1733295133187Disabling writes for close at 1733295133187Writing region close event to WAL at 1733295133188 (+1 ms)Closed at 1733295133188 2024-12-04T06:52:13,190 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T06:52:13,190 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-04T06:52:13,190 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-04T06:52:13,192 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-04T06:52:13,193 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-04T06:52:13,343 DEBUG [607fd5c6574c:45843 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-04T06:52:13,344 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=607fd5c6574c,33613,1733295132530 2024-12-04T06:52:13,346 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 607fd5c6574c,33613,1733295132530, state=OPENING 2024-12-04T06:52:13,349 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-04T06:52:13,352 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45843-0x1017c3eb99a0000, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:13,352 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33613-0x1017c3eb99a0001, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:13,353 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T06:52:13,354 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T06:52:13,355 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-04T06:52:13,355 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=607fd5c6574c,33613,1733295132530}] 2024-12-04T06:52:13,374 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-04T06:52:13,376 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:52:13,408 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:52:13,411 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:52:13,412 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:52:13,510 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-04T06:52:13,512 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58885, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-04T06:52:13,518 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-04T06:52:13,518 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T06:52:13,520 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=607fd5c6574c%2C33613%2C1733295132530.meta, suffix=.meta, logDir=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530, archiveDir=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/oldWALs, maxLogs=32 2024-12-04T06:52:13,523 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta 2024-12-04T06:52:13,534 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta 2024-12-04T06:52:13,538 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38487:38487),(127.0.0.1/127.0.0.1:34647:34647)] 2024-12-04T06:52:13,541 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-04T06:52:13,541 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-04T06:52:13,541 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-04T06:52:13,542 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-04T06:52:13,542 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-04T06:52:13,542 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:52:13,542 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-04T06:52:13,542 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-04T06:52:13,544 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-04T06:52:13,545 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-04T06:52:13,545 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:13,546 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:52:13,546 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-04T06:52:13,547 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-04T06:52:13,547 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:13,548 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:52:13,548 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-04T06:52:13,549 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-04T06:52:13,549 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:13,550 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:52:13,550 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-04T06:52:13,551 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-04T06:52:13,551 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:13,552 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:52:13,552 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-04T06:52:13,553 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/hbase/meta/1588230740 2024-12-04T06:52:13,555 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/hbase/meta/1588230740 2024-12-04T06:52:13,558 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-04T06:52:13,559 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-04T06:52:13,560 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T06:52:13,561 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-04T06:52:13,562 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=766501, jitterRate=-0.02534480392932892}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T06:52:13,562 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-04T06:52:13,563 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733295133542Writing region info on filesystem at 1733295133542Initializing all the Stores at 1733295133544 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295133544Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295133544Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295133544Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295133544Cleaning up temporary data from old regions at 1733295133559 (+15 ms)Running coprocessor post-open hooks at 1733295133562 (+3 ms)Region opened successfully at 1733295133563 (+1 ms) 2024-12-04T06:52:13,565 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733295133510 2024-12-04T06:52:13,568 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-04T06:52:13,569 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-04T06:52:13,569 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=607fd5c6574c,33613,1733295132530 2024-12-04T06:52:13,571 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 607fd5c6574c,33613,1733295132530, state=OPEN 2024-12-04T06:52:13,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45843-0x1017c3eb99a0000, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T06:52:13,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33613-0x1017c3eb99a0001, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T06:52:13,576 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T06:52:13,576 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T06:52:13,576 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=607fd5c6574c,33613,1733295132530 2024-12-04T06:52:13,579 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-04T06:52:13,579 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=607fd5c6574c,33613,1733295132530 in 221 msec 2024-12-04T06:52:13,582 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-04T06:52:13,582 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 389 msec 2024-12-04T06:52:13,584 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T06:52:13,584 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-04T06:52:13,586 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-04T06:52:13,586 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=607fd5c6574c,33613,1733295132530, seqNum=-1] 2024-12-04T06:52:13,586 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T06:52:13,588 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53475, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T06:52:13,594 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 852 msec 2024-12-04T06:52:13,594 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733295133594, completionTime=-1 2024-12-04T06:52:13,594 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-04T06:52:13,594 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-04T06:52:13,596 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-04T06:52:13,596 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733295193596 2024-12-04T06:52:13,596 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733295253596 2024-12-04T06:52:13,596 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-04T06:52:13,597 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,45843,1733295132451-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:13,597 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,45843,1733295132451-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:13,597 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,45843,1733295132451-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:13,597 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-607fd5c6574c:45843, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:13,597 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:13,597 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:13,599 DEBUG [master/607fd5c6574c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-04T06:52:13,601 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.023sec 2024-12-04T06:52:13,601 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-04T06:52:13,601 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-04T06:52:13,601 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-04T06:52:13,601 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-04T06:52:13,601 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-04T06:52:13,601 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,45843,1733295132451-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T06:52:13,601 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,45843,1733295132451-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-04T06:52:13,604 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-04T06:52:13,604 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-04T06:52:13,604 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,45843,1733295132451-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:13,659 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e8113bb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T06:52:13,659 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 607fd5c6574c,45843,-1 for getting cluster id 2024-12-04T06:52:13,659 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-04T06:52:13,662 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e6fd505c-fa26-47ad-839d-051ea7a94883' 2024-12-04T06:52:13,662 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-04T06:52:13,662 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e6fd505c-fa26-47ad-839d-051ea7a94883" 2024-12-04T06:52:13,662 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44eb01bf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T06:52:13,663 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [607fd5c6574c,45843,-1] 2024-12-04T06:52:13,663 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-04T06:52:13,663 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:52:13,665 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43004, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-04T06:52:13,666 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10c7ae56, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T06:52:13,666 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-04T06:52:13,667 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=607fd5c6574c,33613,1733295132530, seqNum=-1] 2024-12-04T06:52:13,668 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T06:52:13,670 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54024, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T06:52:13,671 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=607fd5c6574c,45843,1733295132451 2024-12-04T06:52:13,672 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:52:13,675 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-04T06:52:13,720 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/607fd5c6574c:0 server-side Connection retries=45 2024-12-04T06:52:13,721 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T06:52:13,721 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T06:52:13,721 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T06:52:13,721 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T06:52:13,721 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T06:52:13,721 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-04T06:52:13,721 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T06:52:13,722 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41491 2024-12-04T06:52:13,724 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41491 connecting to ZooKeeper ensemble=127.0.0.1:54010 2024-12-04T06:52:13,725 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:52:13,726 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:52:13,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:414910x0, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T06:52:13,733 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:414910x0, quorum=127.0.0.1:54010, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-12-04T06:52:13,733 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-12-04T06:52:13,733 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41491-0x1017c3eb99a0002 connected 2024-12-04T06:52:13,734 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-04T06:52:13,736 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-04T06:52:13,737 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:41491-0x1017c3eb99a0002, quorum=127.0.0.1:54010, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-04T06:52:13,739 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41491-0x1017c3eb99a0002, quorum=127.0.0.1:54010, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T06:52:13,740 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41491 2024-12-04T06:52:13,741 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41491 2024-12-04T06:52:13,741 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41491 2024-12-04T06:52:13,742 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41491 2024-12-04T06:52:13,742 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41491 2024-12-04T06:52:13,743 INFO [RS:1;607fd5c6574c:41491 {}] regionserver.HRegionServer(746): ClusterId : e6fd505c-fa26-47ad-839d-051ea7a94883 2024-12-04T06:52:13,743 DEBUG [RS:1;607fd5c6574c:41491 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-04T06:52:13,745 DEBUG [RS:1;607fd5c6574c:41491 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-04T06:52:13,745 DEBUG [RS:1;607fd5c6574c:41491 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-04T06:52:13,747 DEBUG [RS:1;607fd5c6574c:41491 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-04T06:52:13,748 DEBUG [RS:1;607fd5c6574c:41491 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70e08ad3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=607fd5c6574c/172.17.0.2:0 2024-12-04T06:52:13,761 DEBUG [RS:1;607fd5c6574c:41491 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;607fd5c6574c:41491 2024-12-04T06:52:13,761 INFO [RS:1;607fd5c6574c:41491 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-04T06:52:13,761 INFO [RS:1;607fd5c6574c:41491 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-04T06:52:13,761 DEBUG [RS:1;607fd5c6574c:41491 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-04T06:52:13,762 INFO [RS:1;607fd5c6574c:41491 {}] regionserver.HRegionServer(2659): reportForDuty to master=607fd5c6574c,45843,1733295132451 with port=41491, startcode=1733295133720 2024-12-04T06:52:13,762 DEBUG [RS:1;607fd5c6574c:41491 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-04T06:52:13,764 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55509, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-04T06:52:13,765 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45843 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 607fd5c6574c,41491,1733295133720 2024-12-04T06:52:13,765 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45843 {}] master.ServerManager(517): Registering regionserver=607fd5c6574c,41491,1733295133720 2024-12-04T06:52:13,767 DEBUG [RS:1;607fd5c6574c:41491 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12 2024-12-04T06:52:13,767 DEBUG [RS:1;607fd5c6574c:41491 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41097 2024-12-04T06:52:13,767 DEBUG [RS:1;607fd5c6574c:41491 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-04T06:52:13,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45843-0x1017c3eb99a0000, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T06:52:13,769 DEBUG [RS:1;607fd5c6574c:41491 {}] zookeeper.ZKUtil(111): regionserver:41491-0x1017c3eb99a0002, quorum=127.0.0.1:54010, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/607fd5c6574c,41491,1733295133720 2024-12-04T06:52:13,769 WARN [RS:1;607fd5c6574c:41491 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T06:52:13,769 INFO [RS:1;607fd5c6574c:41491 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T06:52:13,769 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [607fd5c6574c,41491,1733295133720] 2024-12-04T06:52:13,770 DEBUG [RS:1;607fd5c6574c:41491 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720 2024-12-04T06:52:13,774 INFO [RS:1;607fd5c6574c:41491 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-04T06:52:13,776 INFO [RS:1;607fd5c6574c:41491 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-04T06:52:13,777 INFO [RS:1;607fd5c6574c:41491 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-04T06:52:13,777 INFO [RS:1;607fd5c6574c:41491 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:13,777 INFO [RS:1;607fd5c6574c:41491 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-04T06:52:13,778 INFO [RS:1;607fd5c6574c:41491 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-04T06:52:13,778 INFO [RS:1;607fd5c6574c:41491 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:13,778 DEBUG [RS:1;607fd5c6574c:41491 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:13,778 DEBUG [RS:1;607fd5c6574c:41491 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:13,778 DEBUG [RS:1;607fd5c6574c:41491 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:13,778 DEBUG [RS:1;607fd5c6574c:41491 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:13,778 DEBUG [RS:1;607fd5c6574c:41491 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:13,778 DEBUG [RS:1;607fd5c6574c:41491 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/607fd5c6574c:0, corePoolSize=2, maxPoolSize=2 2024-12-04T06:52:13,778 DEBUG [RS:1;607fd5c6574c:41491 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:13,778 DEBUG [RS:1;607fd5c6574c:41491 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:13,778 DEBUG [RS:1;607fd5c6574c:41491 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:13,778 DEBUG [RS:1;607fd5c6574c:41491 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:13,778 DEBUG [RS:1;607fd5c6574c:41491 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:13,778 DEBUG [RS:1;607fd5c6574c:41491 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:13,779 DEBUG [RS:1;607fd5c6574c:41491 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/607fd5c6574c:0, corePoolSize=3, maxPoolSize=3 2024-12-04T06:52:13,779 DEBUG [RS:1;607fd5c6574c:41491 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0, corePoolSize=3, maxPoolSize=3 2024-12-04T06:52:13,782 INFO [RS:1;607fd5c6574c:41491 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:13,782 INFO [RS:1;607fd5c6574c:41491 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:13,782 INFO [RS:1;607fd5c6574c:41491 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:13,782 INFO [RS:1;607fd5c6574c:41491 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:13,782 INFO [RS:1;607fd5c6574c:41491 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:13,782 INFO [RS:1;607fd5c6574c:41491 {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,41491,1733295133720-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T06:52:13,804 INFO [RS:1;607fd5c6574c:41491 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-04T06:52:13,804 INFO [RS:1;607fd5c6574c:41491 {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,41491,1733295133720-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:13,804 INFO [RS:1;607fd5c6574c:41491 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:13,805 INFO [RS:1;607fd5c6574c:41491 {}] regionserver.Replication(171): 607fd5c6574c,41491,1733295133720 started 2024-12-04T06:52:13,819 INFO [RS:1;607fd5c6574c:41491 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:13,820 INFO [RS:1;607fd5c6574c:41491 {}] regionserver.HRegionServer(1482): Serving as 607fd5c6574c,41491,1733295133720, RpcServer on 607fd5c6574c/172.17.0.2:41491, sessionid=0x1017c3eb99a0002 2024-12-04T06:52:13,820 DEBUG [RS:1;607fd5c6574c:41491 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-04T06:52:13,820 DEBUG [RS:1;607fd5c6574c:41491 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 607fd5c6574c,41491,1733295133720 2024-12-04T06:52:13,820 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;607fd5c6574c:41491,5,FailOnTimeoutGroup] 2024-12-04T06:52:13,820 DEBUG [RS:1;607fd5c6574c:41491 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '607fd5c6574c,41491,1733295133720' 2024-12-04T06:52:13,820 DEBUG [RS:1;607fd5c6574c:41491 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-04T06:52:13,820 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-12-04T06:52:13,821 DEBUG [RS:1;607fd5c6574c:41491 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-04T06:52:13,821 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-04T06:52:13,821 DEBUG [RS:1;607fd5c6574c:41491 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-04T06:52:13,821 DEBUG [RS:1;607fd5c6574c:41491 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-04T06:52:13,821 DEBUG [RS:1;607fd5c6574c:41491 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 607fd5c6574c,41491,1733295133720 2024-12-04T06:52:13,821 DEBUG [RS:1;607fd5c6574c:41491 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '607fd5c6574c,41491,1733295133720' 2024-12-04T06:52:13,821 DEBUG [RS:1;607fd5c6574c:41491 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-04T06:52:13,822 DEBUG [RS:1;607fd5c6574c:41491 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-04T06:52:13,822 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 607fd5c6574c,45843,1733295132451 2024-12-04T06:52:13,822 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3ee6ce85 2024-12-04T06:52:13,822 DEBUG [RS:1;607fd5c6574c:41491 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-04T06:52:13,822 INFO [RS:1;607fd5c6574c:41491 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-04T06:52:13,822 INFO [RS:1;607fd5c6574c:41491 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-04T06:52:13,822 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-04T06:52:13,825 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43008, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-04T06:52:13,825 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45843 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-04T06:52:13,825 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45843 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-04T06:52:13,826 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45843 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T06:52:13,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45843 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-12-04T06:52:13,830 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-12-04T06:52:13,830 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:13,830 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45843 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-12-04T06:52:13,831 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-04T06:52:13,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45843 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-04T06:52:13,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33043 is added to blk_1073741835_1011 (size=393) 2024-12-04T06:52:13,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45609 is added to blk_1073741835_1011 (size=393) 2024-12-04T06:52:13,846 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 6b5e5651a1c0539271423f1bc707b389, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733295133825.6b5e5651a1c0539271423f1bc707b389.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12 2024-12-04T06:52:13,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33043 is added to blk_1073741836_1012 (size=76) 2024-12-04T06:52:13,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45609 is added to blk_1073741836_1012 (size=76) 2024-12-04T06:52:13,854 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733295133825.6b5e5651a1c0539271423f1bc707b389.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:52:13,854 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 6b5e5651a1c0539271423f1bc707b389, disabling compactions & flushes 2024-12-04T06:52:13,854 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733295133825.6b5e5651a1c0539271423f1bc707b389. 2024-12-04T06:52:13,854 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733295133825.6b5e5651a1c0539271423f1bc707b389. 2024-12-04T06:52:13,854 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733295133825.6b5e5651a1c0539271423f1bc707b389. after waiting 0 ms 2024-12-04T06:52:13,854 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733295133825.6b5e5651a1c0539271423f1bc707b389. 2024-12-04T06:52:13,854 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733295133825.6b5e5651a1c0539271423f1bc707b389. 2024-12-04T06:52:13,854 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 6b5e5651a1c0539271423f1bc707b389: Waiting for close lock at 1733295133854Disabling compacts and flushes for region at 1733295133854Disabling writes for close at 1733295133854Writing region close event to WAL at 1733295133854Closed at 1733295133854 2024-12-04T06:52:13,856 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-12-04T06:52:13,856 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1733295133825.6b5e5651a1c0539271423f1bc707b389.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733295133856"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733295133856"}]},"ts":"1733295133856"} 2024-12-04T06:52:13,859 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-04T06:52:13,860 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-04T06:52:13,860 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733295133860"}]},"ts":"1733295133860"} 2024-12-04T06:52:13,863 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-12-04T06:52:13,863 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=6b5e5651a1c0539271423f1bc707b389, ASSIGN}] 2024-12-04T06:52:13,865 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=6b5e5651a1c0539271423f1bc707b389, ASSIGN 2024-12-04T06:52:13,866 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=6b5e5651a1c0539271423f1bc707b389, ASSIGN; state=OFFLINE, location=607fd5c6574c,33613,1733295132530; forceNewPlan=false, retain=false 2024-12-04T06:52:13,925 INFO [RS:1;607fd5c6574c:41491 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=607fd5c6574c%2C41491%2C1733295133720, suffix=, logDir=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720, archiveDir=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/oldWALs, maxLogs=32 2024-12-04T06:52:13,926 INFO [RS:1;607fd5c6574c:41491 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C41491%2C1733295133720.1733295133925 2024-12-04T06:52:13,932 INFO [RS:1;607fd5c6574c:41491 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 2024-12-04T06:52:13,933 DEBUG [RS:1;607fd5c6574c:41491 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34647:34647),(127.0.0.1/127.0.0.1:38487:38487)] 2024-12-04T06:52:14,017 INFO [607fd5c6574c:45843 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-04T06:52:14,017 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=6b5e5651a1c0539271423f1bc707b389, regionState=OPENING, regionLocation=607fd5c6574c,33613,1733295132530 2024-12-04T06:52:14,021 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=6b5e5651a1c0539271423f1bc707b389, ASSIGN because future has completed 2024-12-04T06:52:14,022 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6b5e5651a1c0539271423f1bc707b389, server=607fd5c6574c,33613,1733295132530}] 2024-12-04T06:52:14,181 INFO [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1733295133825.6b5e5651a1c0539271423f1bc707b389. 2024-12-04T06:52:14,181 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 6b5e5651a1c0539271423f1bc707b389, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733295133825.6b5e5651a1c0539271423f1bc707b389.', STARTKEY => '', ENDKEY => ''} 2024-12-04T06:52:14,182 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 6b5e5651a1c0539271423f1bc707b389 2024-12-04T06:52:14,182 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733295133825.6b5e5651a1c0539271423f1bc707b389.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:52:14,182 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 6b5e5651a1c0539271423f1bc707b389 2024-12-04T06:52:14,182 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 6b5e5651a1c0539271423f1bc707b389 2024-12-04T06:52:14,184 INFO [StoreOpener-6b5e5651a1c0539271423f1bc707b389-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 6b5e5651a1c0539271423f1bc707b389 2024-12-04T06:52:14,186 INFO [StoreOpener-6b5e5651a1c0539271423f1bc707b389-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6b5e5651a1c0539271423f1bc707b389 columnFamilyName info 2024-12-04T06:52:14,186 DEBUG [StoreOpener-6b5e5651a1c0539271423f1bc707b389-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:14,187 INFO [StoreOpener-6b5e5651a1c0539271423f1bc707b389-1 {}] regionserver.HStore(327): Store=6b5e5651a1c0539271423f1bc707b389/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T06:52:14,187 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 6b5e5651a1c0539271423f1bc707b389 2024-12-04T06:52:14,188 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389 2024-12-04T06:52:14,188 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389 2024-12-04T06:52:14,189 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 6b5e5651a1c0539271423f1bc707b389 2024-12-04T06:52:14,189 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 6b5e5651a1c0539271423f1bc707b389 2024-12-04T06:52:14,191 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 6b5e5651a1c0539271423f1bc707b389 2024-12-04T06:52:14,194 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T06:52:14,195 INFO [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 6b5e5651a1c0539271423f1bc707b389; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=819119, jitterRate=0.04156386852264404}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-04T06:52:14,195 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6b5e5651a1c0539271423f1bc707b389 2024-12-04T06:52:14,195 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 6b5e5651a1c0539271423f1bc707b389: Running coprocessor pre-open hook at 1733295134182Writing region info on filesystem at 1733295134182Initializing all the Stores at 1733295134184 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295134184Cleaning up temporary data from old regions at 1733295134189 (+5 ms)Running coprocessor post-open hooks at 1733295134195 (+6 ms)Region opened successfully at 1733295134195 2024-12-04T06:52:14,197 INFO [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1733295133825.6b5e5651a1c0539271423f1bc707b389., pid=6, masterSystemTime=1733295134176 2024-12-04T06:52:14,200 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1733295133825.6b5e5651a1c0539271423f1bc707b389. 2024-12-04T06:52:14,200 INFO [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1733295133825.6b5e5651a1c0539271423f1bc707b389. 2024-12-04T06:52:14,201 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=6b5e5651a1c0539271423f1bc707b389, regionState=OPEN, openSeqNum=2, regionLocation=607fd5c6574c,33613,1733295132530 2024-12-04T06:52:14,204 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6b5e5651a1c0539271423f1bc707b389, server=607fd5c6574c,33613,1733295132530 because future has completed 2024-12-04T06:52:14,209 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-04T06:52:14,209 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 6b5e5651a1c0539271423f1bc707b389, server=607fd5c6574c,33613,1733295132530 in 184 msec 2024-12-04T06:52:14,213 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-04T06:52:14,213 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=6b5e5651a1c0539271423f1bc707b389, ASSIGN in 346 msec 2024-12-04T06:52:14,215 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-04T06:52:14,215 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733295134215"}]},"ts":"1733295134215"} 2024-12-04T06:52:14,218 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-12-04T06:52:14,219 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-12-04T06:52:14,222 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 393 msec 2024-12-04T06:52:18,267 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-04T06:52:18,269 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:52:18,284 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:52:18,286 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:52:18,286 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:52:18,792 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-12-04T06:52:22,763 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-04T06:52:22,763 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-04T06:52:22,764 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-04T06:52:22,764 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-12-04T06:52:22,764 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-04T06:52:22,764 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-04T06:52:23,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45843 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-04T06:52:23,899 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-12-04T06:52:23,899 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-12-04T06:52:23,903 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-12-04T06:52:23,903 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1733295133825.6b5e5651a1c0539271423f1bc707b389. 2024-12-04T06:52:23,925 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:52:23,929 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T06:52:23,930 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T06:52:23,930 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T06:52:23,930 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T06:52:23,931 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d0f4a9a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/hadoop.log.dir/,AVAILABLE} 2024-12-04T06:52:23,931 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ff5703b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T06:52:24,052 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2047cbbb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/java.io.tmpdir/jetty-localhost-42187-hadoop-hdfs-3_4_1-tests_jar-_-any-13981310275091285031/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:52:24,052 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2021586{HTTP/1.1, (http/1.1)}{localhost:42187} 2024-12-04T06:52:24,053 INFO [Time-limited test {}] server.Server(415): Started @116577ms 2024-12-04T06:52:24,054 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T06:52:24,105 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:52:24,109 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T06:52:24,113 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T06:52:24,113 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T06:52:24,114 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-04T06:52:24,114 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@519de6b2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/hadoop.log.dir/,AVAILABLE} 2024-12-04T06:52:24,115 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@167a7fde{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T06:52:24,251 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5b5be5aa{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/java.io.tmpdir/jetty-localhost-39385-hadoop-hdfs-3_4_1-tests_jar-_-any-11844150301822975420/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:52:24,252 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7f7383da{HTTP/1.1, (http/1.1)}{localhost:39385} 2024-12-04T06:52:24,252 INFO [Time-limited test {}] server.Server(415): Started @116777ms 2024-12-04T06:52:24,254 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T06:52:24,306 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:52:24,311 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T06:52:24,313 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T06:52:24,313 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T06:52:24,313 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-04T06:52:24,314 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@50ff2063{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/hadoop.log.dir/,AVAILABLE} 2024-12-04T06:52:24,314 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1dc59954{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T06:52:24,397 WARN [Thread-851 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data5/current/BP-1601631713-172.17.0.2-1733295131567/current, will proceed with Du for space computation calculation, 2024-12-04T06:52:24,408 WARN [Thread-852 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data6/current/BP-1601631713-172.17.0.2-1733295131567/current, will proceed with Du for space computation calculation, 2024-12-04T06:52:24,433 WARN [Thread-809 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T06:52:24,436 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb78baae5c413fc50 with lease ID 0x3cd9640a93ef8923: Processing first storage report for DS-bc4841fe-8345-4c74-873d-b57a5538e87b from datanode DatanodeRegistration(127.0.0.1:34091, datanodeUuid=99e7a4f4-4680-4b9c-a6d5-cf6b801391c5, infoPort=37195, infoSecurePort=0, ipcPort=37861, storageInfo=lv=-57;cid=testClusterID;nsid=2075415593;c=1733295131567) 2024-12-04T06:52:24,436 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb78baae5c413fc50 with lease ID 0x3cd9640a93ef8923: from storage DS-bc4841fe-8345-4c74-873d-b57a5538e87b node DatanodeRegistration(127.0.0.1:34091, datanodeUuid=99e7a4f4-4680-4b9c-a6d5-cf6b801391c5, infoPort=37195, infoSecurePort=0, ipcPort=37861, storageInfo=lv=-57;cid=testClusterID;nsid=2075415593;c=1733295131567), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:52:24,436 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb78baae5c413fc50 with lease ID 0x3cd9640a93ef8923: Processing first storage report for DS-7794d44c-3fad-41fb-96b8-39f9ae38d1e2 from datanode DatanodeRegistration(127.0.0.1:34091, datanodeUuid=99e7a4f4-4680-4b9c-a6d5-cf6b801391c5, infoPort=37195, infoSecurePort=0, ipcPort=37861, storageInfo=lv=-57;cid=testClusterID;nsid=2075415593;c=1733295131567) 2024-12-04T06:52:24,437 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb78baae5c413fc50 with lease ID 0x3cd9640a93ef8923: from storage DS-7794d44c-3fad-41fb-96b8-39f9ae38d1e2 node DatanodeRegistration(127.0.0.1:34091, datanodeUuid=99e7a4f4-4680-4b9c-a6d5-cf6b801391c5, infoPort=37195, infoSecurePort=0, ipcPort=37861, storageInfo=lv=-57;cid=testClusterID;nsid=2075415593;c=1733295131567), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:52:24,452 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@30add41a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/java.io.tmpdir/jetty-localhost-39333-hadoop-hdfs-3_4_1-tests_jar-_-any-11880813986180337816/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:52:24,452 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3fd17220{HTTP/1.1, (http/1.1)}{localhost:39333} 2024-12-04T06:52:24,452 INFO [Time-limited test {}] server.Server(415): Started @116977ms 2024-12-04T06:52:24,454 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T06:52:24,526 WARN [Thread-877 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data7/current/BP-1601631713-172.17.0.2-1733295131567/current, will proceed with Du for space computation calculation, 2024-12-04T06:52:24,527 WARN [Thread-878 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data8/current/BP-1601631713-172.17.0.2-1733295131567/current, will proceed with Du for space computation calculation, 2024-12-04T06:52:24,546 WARN [Thread-832 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T06:52:24,548 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xaea369951773c4f4 with lease ID 0x3cd9640a93ef8924: Processing first storage report for DS-a4c173e6-3e64-45c5-bc38-e4bf87df7a95 from datanode DatanodeRegistration(127.0.0.1:36459, datanodeUuid=17fafbd7-2866-48d1-b436-e07253a019a4, infoPort=42175, infoSecurePort=0, ipcPort=43549, storageInfo=lv=-57;cid=testClusterID;nsid=2075415593;c=1733295131567) 2024-12-04T06:52:24,549 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaea369951773c4f4 with lease ID 0x3cd9640a93ef8924: from storage DS-a4c173e6-3e64-45c5-bc38-e4bf87df7a95 node DatanodeRegistration(127.0.0.1:36459, datanodeUuid=17fafbd7-2866-48d1-b436-e07253a019a4, infoPort=42175, infoSecurePort=0, ipcPort=43549, storageInfo=lv=-57;cid=testClusterID;nsid=2075415593;c=1733295131567), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-04T06:52:24,549 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xaea369951773c4f4 with lease ID 0x3cd9640a93ef8924: Processing first storage report for DS-a8bd4afc-ec7d-4f46-b9ce-cbe911e7f5e1 from datanode DatanodeRegistration(127.0.0.1:36459, datanodeUuid=17fafbd7-2866-48d1-b436-e07253a019a4, infoPort=42175, infoSecurePort=0, ipcPort=43549, storageInfo=lv=-57;cid=testClusterID;nsid=2075415593;c=1733295131567) 2024-12-04T06:52:24,549 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaea369951773c4f4 with lease ID 0x3cd9640a93ef8924: from storage DS-a8bd4afc-ec7d-4f46-b9ce-cbe911e7f5e1 node DatanodeRegistration(127.0.0.1:36459, datanodeUuid=17fafbd7-2866-48d1-b436-e07253a019a4, infoPort=42175, infoSecurePort=0, ipcPort=43549, storageInfo=lv=-57;cid=testClusterID;nsid=2075415593;c=1733295131567), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:52:24,713 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data9/current/BP-1601631713-172.17.0.2-1733295131567/current, will proceed with Du for space computation calculation, 2024-12-04T06:52:24,713 WARN [Thread-891 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data10/current/BP-1601631713-172.17.0.2-1733295131567/current, will proceed with Du for space computation calculation, 2024-12-04T06:52:24,731 WARN [Thread-868 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T06:52:24,734 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe7bddc26c15cd710 with lease ID 0x3cd9640a93ef8925: Processing first storage report for DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36 from datanode DatanodeRegistration(127.0.0.1:35439, datanodeUuid=c248eb44-021b-426f-af8d-5e090e4f1042, infoPort=40387, infoSecurePort=0, ipcPort=44471, storageInfo=lv=-57;cid=testClusterID;nsid=2075415593;c=1733295131567) 2024-12-04T06:52:24,734 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe7bddc26c15cd710 with lease ID 0x3cd9640a93ef8925: from storage DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36 node DatanodeRegistration(127.0.0.1:35439, datanodeUuid=c248eb44-021b-426f-af8d-5e090e4f1042, infoPort=40387, infoSecurePort=0, ipcPort=44471, storageInfo=lv=-57;cid=testClusterID;nsid=2075415593;c=1733295131567), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:52:24,734 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe7bddc26c15cd710 with lease ID 0x3cd9640a93ef8925: Processing first storage report for DS-07fce8a4-5492-4bc5-a753-f2a5b54cf18e from datanode DatanodeRegistration(127.0.0.1:35439, datanodeUuid=c248eb44-021b-426f-af8d-5e090e4f1042, infoPort=40387, infoSecurePort=0, ipcPort=44471, storageInfo=lv=-57;cid=testClusterID;nsid=2075415593;c=1733295131567) 2024-12-04T06:52:24,734 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe7bddc26c15cd710 with lease ID 0x3cd9640a93ef8925: from storage DS-07fce8a4-5492-4bc5-a753-f2a5b54cf18e node DatanodeRegistration(127.0.0.1:35439, datanodeUuid=c248eb44-021b-426f-af8d-5e090e4f1042, infoPort=40387, infoSecurePort=0, ipcPort=44471, storageInfo=lv=-57;cid=testClusterID;nsid=2075415593;c=1733295131567), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:52:24,781 WARN [ResponseProcessor for block BP-1601631713-172.17.0.2-1733295131567:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1601631713-172.17.0.2-1733295131567:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:24,781 WARN [ResponseProcessor for block BP-1601631713-172.17.0.2-1733295131567:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1601631713-172.17.0.2-1733295131567:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:24,781 WARN [ResponseProcessor for block BP-1601631713-172.17.0.2-1733295131567:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1601631713-172.17.0.2-1733295131567:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1601631713-172.17.0.2-1733295131567:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:33043,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:24,781 WARN [ResponseProcessor for block BP-1601631713-172.17.0.2-1733295131567:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1601631713-172.17.0.2-1733295131567:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1601631713-172.17.0.2-1733295131567:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:33043,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:24,781 WARN [DataStreamer for file /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 block BP-1601631713-172.17.0.2-1733295131567:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33043,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK], DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33043,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK]) is bad. 2024-12-04T06:52:24,781 WARN [DataStreamer for file /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.1733295132956 block BP-1601631713-172.17.0.2-1733295131567:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33043,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK], DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33043,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK]) is bad. 2024-12-04T06:52:24,782 WARN [DataStreamer for file /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta block BP-1601631713-172.17.0.2-1733295131567:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK], DatanodeInfoWithStorage[127.0.0.1:33043,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33043,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK]) is bad. 2024-12-04T06:52:24,782 WARN [DataStreamer for file /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/WALs/607fd5c6574c,45843,1733295132451/607fd5c6574c%2C45843%2C1733295132451.1733295132659 block BP-1601631713-172.17.0.2-1733295131567:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK], DatanodeInfoWithStorage[127.0.0.1:33043,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33043,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK]) is bad. 2024-12-04T06:52:24,782 WARN [PacketResponder: BP-1601631713-172.17.0.2-1733295131567:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:33043] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:24,782 WARN [PacketResponder: BP-1601631713-172.17.0.2-1733295131567:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:33043] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:24,783 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:36624 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:45609:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36624 dst: /127.0.0.1:45609 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:24,783 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_20485322_22 at /127.0.0.1:36580 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:45609:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36580 dst: /127.0.0.1:45609 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:24,783 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_531354279_22 at /127.0.0.1:36646 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:45609:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36646 dst: /127.0.0.1:45609 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:24,782 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:54328 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33043:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54328 dst: /127.0.0.1:33043 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:24,783 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_20485322_22 at /127.0.0.1:54286 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33043:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54286 dst: /127.0.0.1:33043 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:24,783 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_531354279_22 at /127.0.0.1:54362 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:33043:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54362 dst: /127.0.0.1:33043 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:24,783 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:36608 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:45609:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36608 dst: /127.0.0.1:45609 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:24,784 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:54332 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33043:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54332 dst: /127.0.0.1:33043 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:24,785 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7330fb3f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:52:24,785 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@20aa2ea7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T06:52:24,785 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T06:52:24,786 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ec1a06e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T06:52:24,786 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7eee535{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/hadoop.log.dir/,STOPPED} 2024-12-04T06:52:24,787 WARN [BP-1601631713-172.17.0.2-1733295131567 heartbeating to localhost/127.0.0.1:41097 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T06:52:24,787 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T06:52:24,787 WARN [BP-1601631713-172.17.0.2-1733295131567 heartbeating to localhost/127.0.0.1:41097 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1601631713-172.17.0.2-1733295131567 (Datanode Uuid 068c6fc1-5d76-4592-a144-8a94d67736fb) service to localhost/127.0.0.1:41097 2024-12-04T06:52:24,787 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T06:52:24,787 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data3/current/BP-1601631713-172.17.0.2-1733295131567 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:52:24,788 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data4/current/BP-1601631713-172.17.0.2-1733295131567 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:52:24,788 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T06:52:24,788 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@25b5c353 {}] datanode.DataXceiver(331): 127.0.0.1:45609:DataXceiver error processing unknown operation src: /127.0.0.1:57792 dst: /127.0.0.1:45609 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:24,789 WARN [DataStreamer for file /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.1733295132956 block BP-1601631713-172.17.0.2-1733295131567:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:24,789 WARN [DataStreamer for file /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/WALs/607fd5c6574c,45843,1733295132451/607fd5c6574c%2C45843%2C1733295132451.1733295132659 block BP-1601631713-172.17.0.2-1733295131567:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:24,789 WARN [DataStreamer for file /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta block BP-1601631713-172.17.0.2-1733295131567:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:24,790 WARN [DataStreamer for file /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 block BP-1601631713-172.17.0.2-1733295131567:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:24,793 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@14d09ab9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:52:24,793 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@ee6b493{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T06:52:24,793 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T06:52:24,794 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@48743db4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T06:52:24,794 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5a107105{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/hadoop.log.dir/,STOPPED} 2024-12-04T06:52:24,795 WARN [BP-1601631713-172.17.0.2-1733295131567 heartbeating to localhost/127.0.0.1:41097 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T06:52:24,795 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T06:52:24,795 WARN [BP-1601631713-172.17.0.2-1733295131567 heartbeating to localhost/127.0.0.1:41097 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1601631713-172.17.0.2-1733295131567 (Datanode Uuid 546df34f-c7f7-4370-ba95-552dcf23bcb4) service to localhost/127.0.0.1:41097 2024-12-04T06:52:24,795 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T06:52:24,796 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data1/current/BP-1601631713-172.17.0.2-1733295131567 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:52:24,796 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data2/current/BP-1601631713-172.17.0.2-1733295131567 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:52:24,796 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T06:52:24,800 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1733295133825.6b5e5651a1c0539271423f1bc707b389., hostname=607fd5c6574c,33613,1733295132530, seqNum=2] 2024-12-04T06:52:24,801 ERROR [FSHLog-0-hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12-prefix:607fd5c6574c,33613,1733295132530 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:24,801 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:24,801 WARN [FSHLog-0-hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12-prefix:607fd5c6574c,33613,1733295132530 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:24,801 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:24,801 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 607fd5c6574c%2C33613%2C1733295132530:(num 1733295132956) roll requested 2024-12-04T06:52:24,802 INFO [regionserver/607fd5c6574c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C33613%2C1733295132530.1733295144802 2024-12-04T06:52:24,807 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:24,807 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:24,807 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:24,808 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:24,808 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:24,808 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.1733295132956 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.1733295144802 2024-12-04T06:52:24,808 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:24,808 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:24,809 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-04T06:52:24,810 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-04T06:52:24,810 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.1733295132956 2024-12-04T06:52:24,812 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42175:42175),(127.0.0.1/127.0.0.1:40387:40387)] 2024-12-04T06:52:24,812 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.1733295132956 is not closed yet, will try archiving it next time 2024-12-04T06:52:24,813 WARN [IPC Server handler 1 on default port 41097 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.1733295132956 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-12-04T06:52:24,817 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.1733295132956 after 5ms 2024-12-04T06:52:25,780 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:26,633 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:26,812 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:26,813 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.1733295144802 2024-12-04T06:52:26,814 WARN [ResponseProcessor for block BP-1601631713-172.17.0.2-1733295131567:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1601631713-172.17.0.2-1733295131567:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:26,814 WARN [DataStreamer for file /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.1733295144802 block BP-1601631713-172.17.0.2-1733295131567:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36459,DS-a4c173e6-3e64-45c5-bc38-e4bf87df7a95,DISK], DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36459,DS-a4c173e6-3e64-45c5-bc38-e4bf87df7a95,DISK]) is bad. 2024-12-04T06:52:26,815 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:36438 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:36459:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36438 dst: /127.0.0.1:36459 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:26,815 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:47228 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:35439:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47228 dst: /127.0.0.1:35439 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:26,816 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5b5be5aa{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:52:26,817 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7f7383da{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T06:52:26,817 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T06:52:26,817 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@167a7fde{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T06:52:26,817 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@519de6b2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/hadoop.log.dir/,STOPPED} 2024-12-04T06:52:26,821 WARN [BP-1601631713-172.17.0.2-1733295131567 heartbeating to localhost/127.0.0.1:41097 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T06:52:26,821 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T06:52:26,821 WARN [BP-1601631713-172.17.0.2-1733295131567 heartbeating to localhost/127.0.0.1:41097 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1601631713-172.17.0.2-1733295131567 (Datanode Uuid 17fafbd7-2866-48d1-b436-e07253a019a4) service to localhost/127.0.0.1:41097 2024-12-04T06:52:26,821 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T06:52:26,822 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data7/current/BP-1601631713-172.17.0.2-1733295131567 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:52:26,822 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data8/current/BP-1601631713-172.17.0.2-1733295131567 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:52:26,822 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T06:52:27,780 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:28,633 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:28,812 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:28,813 WARN [regionserver/607fd5c6574c:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK]] 2024-12-04T06:52:28,813 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 607fd5c6574c%2C33613%2C1733295132530:(num 1733295144802) roll requested 2024-12-04T06:52:28,814 INFO [regionserver/607fd5c6574c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C33613%2C1733295132530.1733295148813 2024-12-04T06:52:28,818 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.1733295132956 after 4008ms 2024-12-04T06:52:28,818 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:28,818 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36459,DS-a4c173e6-3e64-45c5-bc38-e4bf87df7a95,DISK], DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36459,DS-a4c173e6-3e64-45c5-bc38-e4bf87df7a95,DISK]) is bad. 2024-12-04T06:52:28,818 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741839_1021 2024-12-04T06:52:28,821 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36459,DS-a4c173e6-3e64-45c5-bc38-e4bf87df7a95,DISK] 2024-12-04T06:52:28,824 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:28,824 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK], DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]) is bad. 2024-12-04T06:52:28,824 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741840_1022 2024-12-04T06:52:28,825 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK] 2024-12-04T06:52:28,826 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:28,826 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33043,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK], DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33043,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK]) is bad. 2024-12-04T06:52:28,826 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741841_1023 2024-12-04T06:52:28,828 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-04T06:52:28,829 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33043,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK] 2024-12-04T06:52:28,838 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:28,838 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:28,838 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:28,839 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:28,839 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:28,839 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.1733295144802 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.1733295148813 2024-12-04T06:52:28,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35439 is added to blk_1073741838_1020 (size=3600) 2024-12-04T06:52:28,852 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37195:37195),(127.0.0.1/127.0.0.1:40387:40387)] 2024-12-04T06:52:28,852 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.1733295132956 is not closed yet, will try archiving it next time 2024-12-04T06:52:28,853 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.1733295144802 is not closed yet, will try archiving it next time 2024-12-04T06:52:29,247 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.1733295132956 is not closed yet, will try archiving it next time 2024-12-04T06:52:29,780 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:30,634 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:30,749 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@29656931[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35439, datanodeUuid=c248eb44-021b-426f-af8d-5e090e4f1042, infoPort=40387, infoSecurePort=0, ipcPort=44471, storageInfo=lv=-57;cid=testClusterID;nsid=2075415593;c=1733295131567):Failed to transfer BP-1601631713-172.17.0.2-1733295131567:blk_1073741838_1020 to 127.0.0.1:36459 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:30,832 WARN [ResponseProcessor for block BP-1601631713-172.17.0.2-1733295131567:blk_1073741842_1024 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1601631713-172.17.0.2-1733295131567:blk_1073741842_1024 java.io.IOException: Bad response ERROR for BP-1601631713-172.17.0.2-1733295131567:blk_1073741842_1024 from datanode DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:30,833 WARN [DataStreamer for file /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.1733295148813 block BP-1601631713-172.17.0.2-1733295131567:blk_1073741842_1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34091,DS-bc4841fe-8345-4c74-873d-b57a5538e87b,DISK], DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK]) is bad. 2024-12-04T06:52:30,833 WARN [PacketResponder: BP-1601631713-172.17.0.2-1733295131567:blk_1073741842_1024, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:35439] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:30,833 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:34970 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:34091:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34970 dst: /127.0.0.1:34091 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:30,833 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:47256 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:35439:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47256 dst: /127.0.0.1:35439 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:30,834 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@30add41a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:52:30,835 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3fd17220{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T06:52:30,835 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T06:52:30,835 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1dc59954{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T06:52:30,835 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@50ff2063{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/hadoop.log.dir/,STOPPED} 2024-12-04T06:52:30,837 WARN [BP-1601631713-172.17.0.2-1733295131567 heartbeating to localhost/127.0.0.1:41097 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T06:52:30,837 WARN [BP-1601631713-172.17.0.2-1733295131567 heartbeating to localhost/127.0.0.1:41097 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1601631713-172.17.0.2-1733295131567 (Datanode Uuid c248eb44-021b-426f-af8d-5e090e4f1042) service to localhost/127.0.0.1:41097 2024-12-04T06:52:30,837 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T06:52:30,837 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T06:52:30,837 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data9/current/BP-1601631713-172.17.0.2-1733295131567 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:52:30,837 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data10/current/BP-1601631713-172.17.0.2-1733295131567 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:52:30,838 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T06:52:30,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33613 {}] regionserver.HRegion(8855): Flush requested on 6b5e5651a1c0539271423f1bc707b389 2024-12-04T06:52:30,847 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6b5e5651a1c0539271423f1bc707b389 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-04T06:52:30,853 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:30,853 WARN [regionserver/607fd5c6574c:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34091,DS-bc4841fe-8345-4c74-873d-b57a5538e87b,DISK]] 2024-12-04T06:52:30,853 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 607fd5c6574c%2C33613%2C1733295132530:(num 1733295148813) roll requested 2024-12-04T06:52:30,854 INFO [regionserver/607fd5c6574c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C33613%2C1733295132530.1733295150853 2024-12-04T06:52:30,856 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:30,857 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK], DatanodeInfoWithStorage[127.0.0.1:34091,DS-bc4841fe-8345-4c74-873d-b57a5538e87b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]) is bad. 2024-12-04T06:52:30,857 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741843_1026 2024-12-04T06:52:30,857 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK] 2024-12-04T06:52:30,859 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:30,859 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33043,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK], DatanodeInfoWithStorage[127.0.0.1:36459,DS-a4c173e6-3e64-45c5-bc38-e4bf87df7a95,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33043,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK]) is bad. 2024-12-04T06:52:30,859 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741844_1027 2024-12-04T06:52:30,860 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33043,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK] 2024-12-04T06:52:30,862 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:30,862 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36459,DS-a4c173e6-3e64-45c5-bc38-e4bf87df7a95,DISK], DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36459,DS-a4c173e6-3e64-45c5-bc38-e4bf87df7a95,DISK]) is bad. 2024-12-04T06:52:30,862 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741845_1028 2024-12-04T06:52:30,863 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36459,DS-a4c173e6-3e64-45c5-bc38-e4bf87df7a95,DISK] 2024-12-04T06:52:30,866 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35439 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:30,866 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34091,DS-bc4841fe-8345-4c74-873d-b57a5538e87b,DISK], DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK]) is bad. 2024-12-04T06:52:30,866 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:44576 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data6]'}, localName='127.0.0.1:34091', datanodeUuid='99e7a4f4-4680-4b9c-a6d5-cf6b801391c5', xmitsInProgress=0}:Exception transferring block BP-1601631713-172.17.0.2-1733295131567:blk_1073741846_1029 to mirror 127.0.0.1:35439 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:30,866 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741846_1029 2024-12-04T06:52:30,866 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:44576 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-04T06:52:30,866 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:44576 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:34091:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44576 dst: /127.0.0.1:34091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:30,867 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK] 2024-12-04T06:52:30,868 WARN [IPC Server handler 4 on default port 41097 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-04T06:52:30,868 WARN [IPC Server handler 4 on default port 41097 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-04T06:52:30,868 WARN [IPC Server handler 4 on default port 41097 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-04T06:52:30,869 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/.tmp/info/7b51ca98eee940d987c39d84eda40948 is 1080, key is row0002/info:/1733295146824/Put/seqid=0 2024-12-04T06:52:30,871 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:30,871 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36459,DS-a4c173e6-3e64-45c5-bc38-e4bf87df7a95,DISK], DatanodeInfoWithStorage[127.0.0.1:34091,DS-bc4841fe-8345-4c74-873d-b57a5538e87b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36459,DS-a4c173e6-3e64-45c5-bc38-e4bf87df7a95,DISK]) is bad. 2024-12-04T06:52:30,871 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741848_1031 2024-12-04T06:52:30,872 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36459,DS-a4c173e6-3e64-45c5-bc38-e4bf87df7a95,DISK] 2024-12-04T06:52:30,872 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:30,872 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:30,872 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:30,873 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:30,873 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:30,873 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:30,873 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK], DatanodeInfoWithStorage[127.0.0.1:34091,DS-bc4841fe-8345-4c74-873d-b57a5538e87b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]) is bad. 2024-12-04T06:52:30,873 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741849_1032 2024-12-04T06:52:30,873 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.1733295148813 with entries=12, filesize=12.96 KB; new WAL /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.1733295150853 2024-12-04T06:52:30,874 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK] 2024-12-04T06:52:30,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741842_1025 (size=13274) 2024-12-04T06:52:30,877 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35439 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:30,877 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:44596 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741850_1033] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data6]'}, localName='127.0.0.1:34091', datanodeUuid='99e7a4f4-4680-4b9c-a6d5-cf6b801391c5', xmitsInProgress=0}:Exception transferring block BP-1601631713-172.17.0.2-1733295131567:blk_1073741850_1033 to mirror 127.0.0.1:35439 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:30,878 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34091,DS-bc4841fe-8345-4c74-873d-b57a5538e87b,DISK], DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK]) is bad. 2024-12-04T06:52:30,878 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741850_1033 2024-12-04T06:52:30,878 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:44596 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741850_1033] {}] datanode.BlockReceiver(316): Block 1073741850 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-04T06:52:30,878 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:44596 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741850_1033] {}] datanode.DataXceiver(331): 127.0.0.1:34091:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44596 dst: /127.0.0.1:34091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:30,878 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK] 2024-12-04T06:52:30,879 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:30,879 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33043,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK], DatanodeInfoWithStorage[127.0.0.1:34091,DS-bc4841fe-8345-4c74-873d-b57a5538e87b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33043,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK]) is bad. 2024-12-04T06:52:30,879 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741851_1034 2024-12-04T06:52:30,880 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33043,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK] 2024-12-04T06:52:30,880 WARN [IPC Server handler 2 on default port 41097 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-04T06:52:30,880 WARN [IPC Server handler 2 on default port 41097 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-04T06:52:30,881 WARN [IPC Server handler 2 on default port 41097 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-04T06:52:30,881 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37195:37195)] 2024-12-04T06:52:30,881 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.1733295132956 is not closed yet, will try archiving it next time 2024-12-04T06:52:30,881 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.1733295148813 is not closed yet, will try archiving it next time 2024-12-04T06:52:30,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741852_1035 (size=10347) 2024-12-04T06:52:31,275 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.1733295132956 is not closed yet, will try archiving it next time 2024-12-04T06:52:31,286 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/.tmp/info/7b51ca98eee940d987c39d84eda40948 2024-12-04T06:52:31,294 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/.tmp/info/7b51ca98eee940d987c39d84eda40948 as hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/info/7b51ca98eee940d987c39d84eda40948 2024-12-04T06:52:31,299 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/info/7b51ca98eee940d987c39d84eda40948, entries=5, sequenceid=11, filesize=10.1 K 2024-12-04T06:52:31,300 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 6b5e5651a1c0539271423f1bc707b389 in 453ms, sequenceid=11, compaction requested=false 2024-12-04T06:52:31,300 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6b5e5651a1c0539271423f1bc707b389: 2024-12-04T06:52:31,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33613 {}] regionserver.HRegion(8855): Flush requested on 6b5e5651a1c0539271423f1bc707b389 2024-12-04T06:52:31,470 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6b5e5651a1c0539271423f1bc707b389 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-12-04T06:52:31,475 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/.tmp/info/f19a864c9c644dcba50f6d61e2fc3cbf is 1080, key is row0007/info:/1733295150848/Put/seqid=0 2024-12-04T06:52:31,476 WARN [Thread-932 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:31,476 WARN [Thread-932 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK], DatanodeInfoWithStorage[127.0.0.1:36459,DS-a4c173e6-3e64-45c5-bc38-e4bf87df7a95,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK]) is bad. 2024-12-04T06:52:31,476 WARN [Thread-932 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741853_1036 2024-12-04T06:52:31,477 WARN [Thread-932 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK] 2024-12-04T06:52:31,478 WARN [Thread-932 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:31,478 WARN [Thread-932 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33043,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK], DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33043,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK]) is bad. 2024-12-04T06:52:31,478 WARN [Thread-932 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741854_1037 2024-12-04T06:52:31,479 WARN [Thread-932 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33043,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK] 2024-12-04T06:52:31,481 WARN [Thread-932 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36459 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:31,481 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:44626 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741855_1038] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data6]'}, localName='127.0.0.1:34091', datanodeUuid='99e7a4f4-4680-4b9c-a6d5-cf6b801391c5', xmitsInProgress=0}:Exception transferring block BP-1601631713-172.17.0.2-1733295131567:blk_1073741855_1038 to mirror 127.0.0.1:36459 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:31,481 WARN [Thread-932 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34091,DS-bc4841fe-8345-4c74-873d-b57a5538e87b,DISK], DatanodeInfoWithStorage[127.0.0.1:36459,DS-a4c173e6-3e64-45c5-bc38-e4bf87df7a95,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36459,DS-a4c173e6-3e64-45c5-bc38-e4bf87df7a95,DISK]) is bad. 2024-12-04T06:52:31,481 WARN [Thread-932 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741855_1038 2024-12-04T06:52:31,481 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:44626 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741855_1038] {}] datanode.BlockReceiver(316): Block 1073741855 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-04T06:52:31,481 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:44626 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741855_1038] {}] datanode.DataXceiver(331): 127.0.0.1:34091:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44626 dst: /127.0.0.1:34091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:31,481 WARN [Thread-932 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36459,DS-a4c173e6-3e64-45c5-bc38-e4bf87df7a95,DISK] 2024-12-04T06:52:31,483 WARN [Thread-932 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:31,483 WARN [Thread-932 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK], DatanodeInfoWithStorage[127.0.0.1:34091,DS-bc4841fe-8345-4c74-873d-b57a5538e87b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]) is bad. 2024-12-04T06:52:31,483 WARN [Thread-932 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741856_1039 2024-12-04T06:52:31,483 WARN [Thread-932 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK] 2024-12-04T06:52:31,484 WARN [IPC Server handler 1 on default port 41097 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-04T06:52:31,484 WARN [IPC Server handler 1 on default port 41097 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-04T06:52:31,484 WARN [IPC Server handler 1 on default port 41097 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-04T06:52:31,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741857_1040 (size=12506) 2024-12-04T06:52:31,781 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:31,887 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/.tmp/info/f19a864c9c644dcba50f6d61e2fc3cbf 2024-12-04T06:52:31,899 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/.tmp/info/f19a864c9c644dcba50f6d61e2fc3cbf as hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/info/f19a864c9c644dcba50f6d61e2fc3cbf 2024-12-04T06:52:31,904 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/info/f19a864c9c644dcba50f6d61e2fc3cbf, entries=7, sequenceid=24, filesize=12.2 K 2024-12-04T06:52:31,906 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 6b5e5651a1c0539271423f1bc707b389 in 435ms, sequenceid=24, compaction requested=false 2024-12-04T06:52:31,906 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6b5e5651a1c0539271423f1bc707b389: 2024-12-04T06:52:31,906 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-12-04T06:52:31,906 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T06:52:31,906 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/info/f19a864c9c644dcba50f6d61e2fc3cbf because midkey is the same as first or last row 2024-12-04T06:52:32,634 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:32,881 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:32,881 WARN [regionserver/607fd5c6574c:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34091,DS-bc4841fe-8345-4c74-873d-b57a5538e87b,DISK]] 2024-12-04T06:52:32,882 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 607fd5c6574c%2C33613%2C1733295132530:(num 1733295150853) roll requested 2024-12-04T06:52:32,882 INFO [regionserver/607fd5c6574c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C33613%2C1733295132530.1733295152882 2024-12-04T06:52:32,885 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:32,885 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK], DatanodeInfoWithStorage[127.0.0.1:36459,DS-a4c173e6-3e64-45c5-bc38-e4bf87df7a95,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK]) is bad. 2024-12-04T06:52:32,885 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741858_1041 2024-12-04T06:52:32,886 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK] 2024-12-04T06:52:32,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33613 {}] regionserver.HRegion(8855): Flush requested on 6b5e5651a1c0539271423f1bc707b389 2024-12-04T06:52:32,887 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:32,887 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36459,DS-a4c173e6-3e64-45c5-bc38-e4bf87df7a95,DISK], DatanodeInfoWithStorage[127.0.0.1:34091,DS-bc4841fe-8345-4c74-873d-b57a5538e87b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36459,DS-a4c173e6-3e64-45c5-bc38-e4bf87df7a95,DISK]) is bad. 2024-12-04T06:52:32,887 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741859_1042 2024-12-04T06:52:32,887 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6b5e5651a1c0539271423f1bc707b389 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-04T06:52:32,887 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36459,DS-a4c173e6-3e64-45c5-bc38-e4bf87df7a95,DISK] 2024-12-04T06:52:32,890 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:44640 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741860_1043] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data6]'}, localName='127.0.0.1:34091', datanodeUuid='99e7a4f4-4680-4b9c-a6d5-cf6b801391c5', xmitsInProgress=0}:Exception transferring block BP-1601631713-172.17.0.2-1733295131567:blk_1073741860_1043 to mirror 127.0.0.1:45609 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:32,890 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:44640 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741860_1043] {}] datanode.BlockReceiver(316): Block 1073741860 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-04T06:52:32,890 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:44640 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741860_1043] {}] datanode.DataXceiver(331): 127.0.0.1:34091:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44640 dst: /127.0.0.1:34091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:32,890 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45609 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:32,890 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34091,DS-bc4841fe-8345-4c74-873d-b57a5538e87b,DISK], DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]) is bad. 2024-12-04T06:52:32,890 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741860_1043 2024-12-04T06:52:32,891 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK] 2024-12-04T06:52:32,893 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33043 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:32,893 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:44654 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741861_1044] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data6]'}, localName='127.0.0.1:34091', datanodeUuid='99e7a4f4-4680-4b9c-a6d5-cf6b801391c5', xmitsInProgress=0}:Exception transferring block BP-1601631713-172.17.0.2-1733295131567:blk_1073741861_1044 to mirror 127.0.0.1:33043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:32,893 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34091,DS-bc4841fe-8345-4c74-873d-b57a5538e87b,DISK], DatanodeInfoWithStorage[127.0.0.1:33043,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33043,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK]) is bad. 2024-12-04T06:52:32,893 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741861_1044 2024-12-04T06:52:32,893 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:44654 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741861_1044] {}] datanode.BlockReceiver(316): Block 1073741861 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-04T06:52:32,893 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:44654 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741861_1044] {}] datanode.DataXceiver(331): 127.0.0.1:34091:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44654 dst: /127.0.0.1:34091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:32,893 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/.tmp/info/1b2dd89566c04f9d8f935cb7ecbeac10 is 1079, key is tmprow/info:/1733295152886/Put/seqid=0 2024-12-04T06:52:32,894 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33043,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK] 2024-12-04T06:52:32,894 WARN [IPC Server handler 0 on default port 41097 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-04T06:52:32,894 WARN [IPC Server handler 0 on default port 41097 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-04T06:52:32,894 WARN [IPC Server handler 0 on default port 41097 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-04T06:52:32,895 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:32,895 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK], DatanodeInfoWithStorage[127.0.0.1:34091,DS-bc4841fe-8345-4c74-873d-b57a5538e87b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK]) is bad. 2024-12-04T06:52:32,895 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741863_1046 2024-12-04T06:52:32,896 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK] 2024-12-04T06:52:32,896 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:32,896 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:32,897 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:32,897 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:32,897 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:32,897 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:32,897 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33043,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK], DatanodeInfoWithStorage[127.0.0.1:36459,DS-a4c173e6-3e64-45c5-bc38-e4bf87df7a95,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33043,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK]) is bad. 2024-12-04T06:52:32,897 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741864_1047 2024-12-04T06:52:32,897 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.1733295150853 with entries=14, filesize=12.82 KB; new WAL /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.1733295152882 2024-12-04T06:52:32,897 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33043,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK] 2024-12-04T06:52:32,898 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37195:37195)] 2024-12-04T06:52:32,898 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.1733295132956 is not closed yet, will try archiving it next time 2024-12-04T06:52:32,898 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.1733295150853 is not closed yet, will try archiving it next time 2024-12-04T06:52:32,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741847_1030 (size=13133) 2024-12-04T06:52:32,899 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:32,899 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.1733295144802 to hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/oldWALs/607fd5c6574c%2C33613%2C1733295132530.1733295144802 2024-12-04T06:52:32,899 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36459,DS-a4c173e6-3e64-45c5-bc38-e4bf87df7a95,DISK], DatanodeInfoWithStorage[127.0.0.1:34091,DS-bc4841fe-8345-4c74-873d-b57a5538e87b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36459,DS-a4c173e6-3e64-45c5-bc38-e4bf87df7a95,DISK]) is bad. 2024-12-04T06:52:32,899 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741865_1048 2024-12-04T06:52:32,900 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36459,DS-a4c173e6-3e64-45c5-bc38-e4bf87df7a95,DISK] 2024-12-04T06:52:32,900 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.1733295148813 to hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/oldWALs/607fd5c6574c%2C33613%2C1733295132530.1733295148813 2024-12-04T06:52:32,901 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:32,901 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK], DatanodeInfoWithStorage[127.0.0.1:34091,DS-bc4841fe-8345-4c74-873d-b57a5538e87b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]) is bad. 2024-12-04T06:52:32,901 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741866_1049 2024-12-04T06:52:32,901 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK] 2024-12-04T06:52:32,902 WARN [IPC Server handler 1 on default port 41097 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-04T06:52:32,902 WARN [IPC Server handler 1 on default port 41097 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-04T06:52:32,902 WARN [IPC Server handler 1 on default port 41097 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-04T06:52:32,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741867_1050 (size=6027) 2024-12-04T06:52:33,299 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.1733295132956 is not closed yet, will try archiving it next time 2024-12-04T06:52:33,305 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/.tmp/info/1b2dd89566c04f9d8f935cb7ecbeac10 2024-12-04T06:52:33,312 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/.tmp/info/1b2dd89566c04f9d8f935cb7ecbeac10 as hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/info/1b2dd89566c04f9d8f935cb7ecbeac10 2024-12-04T06:52:33,318 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/info/1b2dd89566c04f9d8f935cb7ecbeac10, entries=1, sequenceid=34, filesize=5.9 K 2024-12-04T06:52:33,320 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 6b5e5651a1c0539271423f1bc707b389 in 432ms, sequenceid=34, compaction requested=true 2024-12-04T06:52:33,320 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6b5e5651a1c0539271423f1bc707b389: 2024-12-04T06:52:33,320 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-12-04T06:52:33,320 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T06:52:33,320 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/info/f19a864c9c644dcba50f6d61e2fc3cbf because midkey is the same as first or last row 2024-12-04T06:52:33,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6b5e5651a1c0539271423f1bc707b389:info, priority=-2147483648, current under compaction store size is 1 2024-12-04T06:52:33,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T06:52:33,325 DEBUG [RS:0;607fd5c6574c:33613-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T06:52:33,327 DEBUG [RS:0;607fd5c6574c:33613-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T06:52:33,327 DEBUG [RS:0;607fd5c6574c:33613-shortCompactions-0 {}] regionserver.HStore(1541): 6b5e5651a1c0539271423f1bc707b389/info is initiating minor compaction (all files) 2024-12-04T06:52:33,328 INFO [RS:0;607fd5c6574c:33613-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 6b5e5651a1c0539271423f1bc707b389/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733295133825.6b5e5651a1c0539271423f1bc707b389. 2024-12-04T06:52:33,328 INFO [RS:0;607fd5c6574c:33613-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/info/7b51ca98eee940d987c39d84eda40948, hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/info/f19a864c9c644dcba50f6d61e2fc3cbf, hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/info/1b2dd89566c04f9d8f935cb7ecbeac10] into tmpdir=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/.tmp, totalSize=28.2 K 2024-12-04T06:52:33,328 DEBUG [RS:0;607fd5c6574c:33613-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7b51ca98eee940d987c39d84eda40948, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733295146824 2024-12-04T06:52:33,329 DEBUG [RS:0;607fd5c6574c:33613-shortCompactions-0 {}] compactions.Compactor(225): Compacting f19a864c9c644dcba50f6d61e2fc3cbf, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1733295150848 2024-12-04T06:52:33,330 DEBUG [RS:0;607fd5c6574c:33613-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1b2dd89566c04f9d8f935cb7ecbeac10, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733295152886 2024-12-04T06:52:33,345 INFO [RS:0;607fd5c6574c:33613-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6b5e5651a1c0539271423f1bc707b389#info#compaction#21 average throughput is 4.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T06:52:33,345 DEBUG [RS:0;607fd5c6574c:33613-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/.tmp/info/33d97ff7cd70430c8ffb618a6cedf46f is 1080, key is row0002/info:/1733295146824/Put/seqid=0 2024-12-04T06:52:33,347 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:33,347 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36459,DS-a4c173e6-3e64-45c5-bc38-e4bf87df7a95,DISK], DatanodeInfoWithStorage[127.0.0.1:34091,DS-bc4841fe-8345-4c74-873d-b57a5538e87b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36459,DS-a4c173e6-3e64-45c5-bc38-e4bf87df7a95,DISK]) is bad. 2024-12-04T06:52:33,348 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741868_1051 2024-12-04T06:52:33,348 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36459,DS-a4c173e6-3e64-45c5-bc38-e4bf87df7a95,DISK] 2024-12-04T06:52:33,350 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:33,350 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK], DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]) is bad. 2024-12-04T06:52:33,350 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741869_1052 2024-12-04T06:52:33,351 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK] 2024-12-04T06:52:33,353 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33043 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:33,353 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:44696 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741870_1053] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data6]'}, localName='127.0.0.1:34091', datanodeUuid='99e7a4f4-4680-4b9c-a6d5-cf6b801391c5', xmitsInProgress=0}:Exception transferring block BP-1601631713-172.17.0.2-1733295131567:blk_1073741870_1053 to mirror 127.0.0.1:33043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:33,354 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34091,DS-bc4841fe-8345-4c74-873d-b57a5538e87b,DISK], DatanodeInfoWithStorage[127.0.0.1:33043,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33043,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK]) is bad. 2024-12-04T06:52:33,354 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:44696 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741870_1053] {}] datanode.BlockReceiver(316): Block 1073741870 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-04T06:52:33,354 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741870_1053 2024-12-04T06:52:33,354 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:44696 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741870_1053] {}] datanode.DataXceiver(331): 127.0.0.1:34091:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44696 dst: /127.0.0.1:34091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:33,354 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33043,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK] 2024-12-04T06:52:33,357 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35439 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:33,357 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:44712 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741871_1054] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data6]'}, localName='127.0.0.1:34091', datanodeUuid='99e7a4f4-4680-4b9c-a6d5-cf6b801391c5', xmitsInProgress=0}:Exception transferring block BP-1601631713-172.17.0.2-1733295131567:blk_1073741871_1054 to mirror 127.0.0.1:35439 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:33,357 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34091,DS-bc4841fe-8345-4c74-873d-b57a5538e87b,DISK], DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK]) is bad. 2024-12-04T06:52:33,357 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741871_1054 2024-12-04T06:52:33,357 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:44712 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741871_1054] {}] datanode.BlockReceiver(316): Block 1073741871 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-04T06:52:33,357 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:44712 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741871_1054] {}] datanode.DataXceiver(331): 127.0.0.1:34091:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44712 dst: /127.0.0.1:34091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:33,358 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK] 2024-12-04T06:52:33,358 WARN [IPC Server handler 2 on default port 41097 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-04T06:52:33,358 WARN [IPC Server handler 2 on default port 41097 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-04T06:52:33,358 WARN [IPC Server handler 2 on default port 41097 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-04T06:52:33,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741872_1055 (size=17994) 2024-12-04T06:52:33,437 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@407b0d80[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34091, datanodeUuid=99e7a4f4-4680-4b9c-a6d5-cf6b801391c5, infoPort=37195, infoSecurePort=0, ipcPort=37861, storageInfo=lv=-57;cid=testClusterID;nsid=2075415593;c=1733295131567):Failed to transfer BP-1601631713-172.17.0.2-1733295131567:blk_1073741842_1025 to 127.0.0.1:36459 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:33,437 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4ebcebff[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34091, datanodeUuid=99e7a4f4-4680-4b9c-a6d5-cf6b801391c5, infoPort=37195, infoSecurePort=0, ipcPort=37861, storageInfo=lv=-57;cid=testClusterID;nsid=2075415593;c=1733295131567):Failed to transfer BP-1601631713-172.17.0.2-1733295131567:blk_1073741852_1035 to 127.0.0.1:33043 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:33,770 DEBUG [RS:0;607fd5c6574c:33613-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/.tmp/info/33d97ff7cd70430c8ffb618a6cedf46f as hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/info/33d97ff7cd70430c8ffb618a6cedf46f 2024-12-04T06:52:33,779 INFO [RS:0;607fd5c6574c:33613-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 6b5e5651a1c0539271423f1bc707b389/info of 6b5e5651a1c0539271423f1bc707b389 into 33d97ff7cd70430c8ffb618a6cedf46f(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T06:52:33,779 DEBUG [RS:0;607fd5c6574c:33613-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 6b5e5651a1c0539271423f1bc707b389: 2024-12-04T06:52:33,779 INFO [RS:0;607fd5c6574c:33613-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733295133825.6b5e5651a1c0539271423f1bc707b389., storeName=6b5e5651a1c0539271423f1bc707b389/info, priority=13, startTime=1733295153320; duration=0sec 2024-12-04T06:52:33,779 DEBUG [RS:0;607fd5c6574c:33613-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-04T06:52:33,779 DEBUG [RS:0;607fd5c6574c:33613-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T06:52:33,780 DEBUG [RS:0;607fd5c6574c:33613-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/info/33d97ff7cd70430c8ffb618a6cedf46f because midkey is the same as first or last row 2024-12-04T06:52:33,780 DEBUG [RS:0;607fd5c6574c:33613-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-04T06:52:33,780 DEBUG [RS:0;607fd5c6574c:33613-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T06:52:33,780 DEBUG [RS:0;607fd5c6574c:33613-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/info/33d97ff7cd70430c8ffb618a6cedf46f because midkey is the same as first or last row 2024-12-04T06:52:33,780 DEBUG [RS:0;607fd5c6574c:33613-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-04T06:52:33,780 DEBUG [RS:0;607fd5c6574c:33613-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T06:52:33,780 DEBUG [RS:0;607fd5c6574c:33613-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/info/33d97ff7cd70430c8ffb618a6cedf46f because midkey is the same as first or last row 2024-12-04T06:52:33,780 DEBUG [RS:0;607fd5c6574c:33613-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T06:52:33,780 DEBUG [RS:0;607fd5c6574c:33613-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6b5e5651a1c0539271423f1bc707b389:info 2024-12-04T06:52:33,781 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:34,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33613 {}] regionserver.HRegion(8855): Flush requested on 6b5e5651a1c0539271423f1bc707b389 2024-12-04T06:52:34,306 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6b5e5651a1c0539271423f1bc707b389 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-04T06:52:34,310 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/.tmp/info/9ef3086d3e1f40ee9e66f09332cb9b8e is 1079, key is tmprow/info:/1733295154305/Put/seqid=0 2024-12-04T06:52:34,312 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:34,312 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK], DatanodeInfoWithStorage[127.0.0.1:34091,DS-bc4841fe-8345-4c74-873d-b57a5538e87b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK]) is bad. 2024-12-04T06:52:34,312 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741873_1056 2024-12-04T06:52:34,313 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK] 2024-12-04T06:52:34,315 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33043 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:34,315 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:44738 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741874_1057] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data6]'}, localName='127.0.0.1:34091', datanodeUuid='99e7a4f4-4680-4b9c-a6d5-cf6b801391c5', xmitsInProgress=0}:Exception transferring block BP-1601631713-172.17.0.2-1733295131567:blk_1073741874_1057 to mirror 127.0.0.1:33043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:34,315 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34091,DS-bc4841fe-8345-4c74-873d-b57a5538e87b,DISK], DatanodeInfoWithStorage[127.0.0.1:33043,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33043,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK]) is bad. 2024-12-04T06:52:34,315 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:44738 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741874_1057] {}] datanode.BlockReceiver(316): Block 1073741874 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-04T06:52:34,315 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741874_1057 2024-12-04T06:52:34,315 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:44738 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741874_1057] {}] datanode.DataXceiver(331): 127.0.0.1:34091:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44738 dst: /127.0.0.1:34091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:34,316 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33043,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK] 2024-12-04T06:52:34,317 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:34,317 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK], DatanodeInfoWithStorage[127.0.0.1:36459,DS-a4c173e6-3e64-45c5-bc38-e4bf87df7a95,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]) is bad. 2024-12-04T06:52:34,317 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741875_1058 2024-12-04T06:52:34,318 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK] 2024-12-04T06:52:34,320 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36459 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:34,320 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:44740 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741876_1059] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data6]'}, localName='127.0.0.1:34091', datanodeUuid='99e7a4f4-4680-4b9c-a6d5-cf6b801391c5', xmitsInProgress=0}:Exception transferring block BP-1601631713-172.17.0.2-1733295131567:blk_1073741876_1059 to mirror 127.0.0.1:36459 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:34,320 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34091,DS-bc4841fe-8345-4c74-873d-b57a5538e87b,DISK], DatanodeInfoWithStorage[127.0.0.1:36459,DS-a4c173e6-3e64-45c5-bc38-e4bf87df7a95,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36459,DS-a4c173e6-3e64-45c5-bc38-e4bf87df7a95,DISK]) is bad. 2024-12-04T06:52:34,320 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741876_1059 2024-12-04T06:52:34,320 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:44740 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741876_1059] {}] datanode.BlockReceiver(316): Block 1073741876 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-04T06:52:34,320 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:44740 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741876_1059] {}] datanode.DataXceiver(331): 127.0.0.1:34091:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44740 dst: /127.0.0.1:34091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:34,320 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36459,DS-a4c173e6-3e64-45c5-bc38-e4bf87df7a95,DISK] 2024-12-04T06:52:34,321 WARN [IPC Server handler 3 on default port 41097 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-04T06:52:34,321 WARN [IPC Server handler 3 on default port 41097 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-04T06:52:34,321 WARN [IPC Server handler 3 on default port 41097 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-04T06:52:34,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741877_1060 (size=6027) 2024-12-04T06:52:34,437 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4ebcebff[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34091, datanodeUuid=99e7a4f4-4680-4b9c-a6d5-cf6b801391c5, infoPort=37195, infoSecurePort=0, ipcPort=37861, storageInfo=lv=-57;cid=testClusterID;nsid=2075415593;c=1733295131567):Failed to transfer BP-1601631713-172.17.0.2-1733295131567:blk_1073741857_1040 to 127.0.0.1:36459 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:34,635 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:34,726 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/.tmp/info/9ef3086d3e1f40ee9e66f09332cb9b8e 2024-12-04T06:52:34,733 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/.tmp/info/9ef3086d3e1f40ee9e66f09332cb9b8e as hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/info/9ef3086d3e1f40ee9e66f09332cb9b8e 2024-12-04T06:52:34,738 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/info/9ef3086d3e1f40ee9e66f09332cb9b8e, entries=1, sequenceid=45, filesize=5.9 K 2024-12-04T06:52:34,739 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 6b5e5651a1c0539271423f1bc707b389 in 433ms, sequenceid=45, compaction requested=false 2024-12-04T06:52:34,739 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6b5e5651a1c0539271423f1bc707b389: 2024-12-04T06:52:34,739 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-12-04T06:52:34,739 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T06:52:34,739 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/info/33d97ff7cd70430c8ffb618a6cedf46f because midkey is the same as first or last row 2024-12-04T06:52:34,899 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:34,899 WARN [regionserver/607fd5c6574c:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-12-04T06:52:34,921 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:52:34,925 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T06:52:34,929 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T06:52:34,929 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T06:52:34,929 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-04T06:52:34,929 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7d04364e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/hadoop.log.dir/,AVAILABLE} 2024-12-04T06:52:34,930 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@568b1686{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T06:52:35,051 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3740407e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/java.io.tmpdir/jetty-localhost-43333-hadoop-hdfs-3_4_1-tests_jar-_-any-3189811523543106618/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:52:35,052 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2e234cf7{HTTP/1.1, (http/1.1)}{localhost:43333} 2024-12-04T06:52:35,052 INFO [Time-limited test {}] server.Server(415): Started @127577ms 2024-12-04T06:52:35,053 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T06:52:35,163 WARN [Thread-978 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T06:52:35,174 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa8dd5440fc907ea6 with lease ID 0x3cd9640a93ef8926: from storage DS-2f609111-e44f-4cdd-9e6d-8dc690e52386 node DatanodeRegistration(127.0.0.1:39643, datanodeUuid=068c6fc1-5d76-4592-a144-8a94d67736fb, infoPort=43901, infoSecurePort=0, ipcPort=38665, storageInfo=lv=-57;cid=testClusterID;nsid=2075415593;c=1733295131567), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-04T06:52:35,174 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa8dd5440fc907ea6 with lease ID 0x3cd9640a93ef8926: from storage DS-a550b0e7-8b53-489a-a0f2-0e01b9e8ef92 node DatanodeRegistration(127.0.0.1:39643, datanodeUuid=068c6fc1-5d76-4592-a144-8a94d67736fb, infoPort=43901, infoSecurePort=0, ipcPort=38665, storageInfo=lv=-57;cid=testClusterID;nsid=2075415593;c=1733295131567), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:52:35,781 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:36,438 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@407b0d80[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34091, datanodeUuid=99e7a4f4-4680-4b9c-a6d5-cf6b801391c5, infoPort=37195, infoSecurePort=0, ipcPort=37861, storageInfo=lv=-57;cid=testClusterID;nsid=2075415593;c=1733295131567):Failed to transfer BP-1601631713-172.17.0.2-1733295131567:blk_1073741867_1050 to 127.0.0.1:35439 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:36,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39643 is added to blk_1073741847_1030 (size=13133) 2024-12-04T06:52:36,635 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:36,899 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:37,438 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4ebcebff[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34091, datanodeUuid=99e7a4f4-4680-4b9c-a6d5-cf6b801391c5, infoPort=37195, infoSecurePort=0, ipcPort=37861, storageInfo=lv=-57;cid=testClusterID;nsid=2075415593;c=1733295131567):Failed to transfer BP-1601631713-172.17.0.2-1733295131567:blk_1073741872_1055 to 127.0.0.1:35439 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:37,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39643 is added to blk_1073741877_1060 (size=6027) 2024-12-04T06:52:37,782 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:38,635 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:38,899 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:39,782 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:40,636 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:40,900 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:41,783 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:42,423 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-04T06:52:42,636 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:42,747 ERROR [FSHLog-0-hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData-prefix:607fd5c6574c,45843,1733295132451 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:42,747 WARN [FSHLog-0-hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData-prefix:607fd5c6574c,45843,1733295132451 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:42,747 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 607fd5c6574c%2C45843%2C1733295132451:(num 1733295132659) roll requested 2024-12-04T06:52:42,748 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C45843%2C1733295132451.1733295162747 2024-12-04T06:52:42,751 WARN [Thread-1001 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:42,752 WARN [Thread-1001 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK], DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]) is bad. 2024-12-04T06:52:42,752 WARN [Thread-1001 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741878_1061 2024-12-04T06:52:42,752 WARN [Thread-1001 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK] 2024-12-04T06:52:42,757 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:42,757 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:42,757 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:42,757 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:42,757 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:42,758 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/WALs/607fd5c6574c,45843,1733295132451/607fd5c6574c%2C45843%2C1733295132451.1733295132659 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/WALs/607fd5c6574c,45843,1733295132451/607fd5c6574c%2C45843%2C1733295132451.1733295162747 2024-12-04T06:52:42,758 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:42,758 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:42,758 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/WALs/607fd5c6574c,45843,1733295132451/607fd5c6574c%2C45843%2C1733295132451.1733295132659 2024-12-04T06:52:42,759 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43901:43901),(127.0.0.1/127.0.0.1:37195:37195)] 2024-12-04T06:52:42,759 WARN [IPC Server handler 4 on default port 41097 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/WALs/607fd5c6574c,45843,1733295132451/607fd5c6574c%2C45843%2C1733295132451.1733295132659 has not been closed. Lease recovery is in progress. RecoveryId = 1063 for block blk_1073741830_1006 2024-12-04T06:52:42,759 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/WALs/607fd5c6574c,45843,1733295132451/607fd5c6574c%2C45843%2C1733295132451.1733295132659 is not closed yet, will try archiving it next time 2024-12-04T06:52:42,759 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/WALs/607fd5c6574c,45843,1733295132451/607fd5c6574c%2C45843%2C1733295132451.1733295132659 after 1ms 2024-12-04T06:52:42,900 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:43,783 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:44,901 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:45,192 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2108ed38 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1601631713-172.17.0.2-1733295131567:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:45609,null,null]) java.net.ConnectException: Call From 607fd5c6574c/172.17.0.2 to localhost:41113 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-04T06:52:45,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39643 is added to blk_1073741833_1019 (size=455) 2024-12-04T06:52:45,784 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:45,834 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.1733295132956 to hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/oldWALs/607fd5c6574c%2C33613%2C1733295132530.1733295132956 2024-12-04T06:52:45,835 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.1733295150853 to hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/oldWALs/607fd5c6574c%2C33613%2C1733295132530.1733295150853 2024-12-04T06:52:46,760 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/WALs/607fd5c6574c,45843,1733295132451/607fd5c6574c%2C45843%2C1733295132451.1733295132659 after 4002ms 2024-12-04T06:52:46,901 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:47,784 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:48,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741835_1011 (size=393) 2024-12-04T06:52:48,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741833_1019 (size=455) 2024-12-04T06:52:48,902 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:49,167 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@10e32bc0[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39643, datanodeUuid=068c6fc1-5d76-4592-a144-8a94d67736fb, infoPort=43901, infoSecurePort=0, ipcPort=38665, storageInfo=lv=-57;cid=testClusterID;nsid=2075415593;c=1733295131567):Failed to transfer BP-1601631713-172.17.0.2-1733295131567:blk_1073741831_1007 to 127.0.0.1:35439 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:49,167 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@27a04a96[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39643, datanodeUuid=068c6fc1-5d76-4592-a144-8a94d67736fb, infoPort=43901, infoSecurePort=0, ipcPort=38665, storageInfo=lv=-57;cid=testClusterID;nsid=2075415593;c=1733295131567):Failed to transfer BP-1601631713-172.17.0.2-1733295131567:blk_1073741829_1005 to 127.0.0.1:35439 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:49,784 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:50,806 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C33613%2C1733295132530.1733295170806 2024-12-04T06:52:50,810 WARN [Thread-1014 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1064 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:50,810 WARN [Thread-1014 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741880_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK], DatanodeInfoWithStorage[127.0.0.1:34091,DS-bc4841fe-8345-4c74-873d-b57a5538e87b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK]) is bad. 2024-12-04T06:52:50,810 WARN [Thread-1014 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741880_1064 2024-12-04T06:52:50,811 WARN [Thread-1014 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK] 2024-12-04T06:52:50,821 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:50,821 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:50,822 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:50,822 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:50,822 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:50,822 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.1733295152882 with entries=13, filesize=11.81 KB; new WAL /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.1733295170806 2024-12-04T06:52:50,823 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43901:43901),(127.0.0.1/127.0.0.1:37195:37195)] 2024-12-04T06:52:50,823 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.1733295152882 is not closed yet, will try archiving it next time 2024-12-04T06:52:50,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741862_1045 (size=12100) 2024-12-04T06:52:50,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33613 {}] regionserver.HRegion(8855): Flush requested on 6b5e5651a1c0539271423f1bc707b389 2024-12-04T06:52:50,834 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6b5e5651a1c0539271423f1bc707b389 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-04T06:52:50,842 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/.tmp/info/df7d2b6d61664309a2bbc9c14e42426b is 1080, key is row0013/info:/1733295170824/Put/seqid=0 2024-12-04T06:52:50,846 WARN [Thread-1020 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:50,846 WARN [Thread-1020 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741882_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK], DatanodeInfoWithStorage[127.0.0.1:39643,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK]) is bad. 2024-12-04T06:52:50,846 WARN [Thread-1020 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741882_1066 2024-12-04T06:52:50,847 WARN [Thread-1020 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK] 2024-12-04T06:52:50,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741883_1067 (size=11421) 2024-12-04T06:52:50,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39643 is added to blk_1073741883_1067 (size=11421) 2024-12-04T06:52:50,902 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:50,902 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-12-04T06:52:51,049 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-04T06:52:51,050 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-04T06:52:51,050 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T06:52:51,050 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:52:51,050 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:52:51,050 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-04T06:52:51,050 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-04T06:52:51,050 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1887686222, stopped=false 2024-12-04T06:52:51,050 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=607fd5c6574c,45843,1733295132451 2024-12-04T06:52:51,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45843-0x1017c3eb99a0000, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T06:52:51,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41491-0x1017c3eb99a0002, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T06:52:51,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33613-0x1017c3eb99a0001, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T06:52:51,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45843-0x1017c3eb99a0000, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:51,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41491-0x1017c3eb99a0002, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:51,053 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-04T06:52:51,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33613-0x1017c3eb99a0001, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:51,053 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-04T06:52:51,053 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T06:52:51,053 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:52:51,053 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '607fd5c6574c,33613,1733295132530' ***** 2024-12-04T06:52:51,053 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33613-0x1017c3eb99a0001, quorum=127.0.0.1:54010, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T06:52:51,053 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-04T06:52:51,053 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '607fd5c6574c,41491,1733295133720' ***** 2024-12-04T06:52:51,054 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-04T06:52:51,054 INFO [RS:0;607fd5c6574c:33613 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-04T06:52:51,054 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45843-0x1017c3eb99a0000, quorum=127.0.0.1:54010, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T06:52:51,054 INFO [RS:1;607fd5c6574c:41491 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-04T06:52:51,054 INFO [RS:1;607fd5c6574c:41491 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-04T06:52:51,054 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41491-0x1017c3eb99a0002, quorum=127.0.0.1:54010, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T06:52:51,054 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-04T06:52:51,054 INFO [RS:1;607fd5c6574c:41491 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-04T06:52:51,054 INFO [RS:1;607fd5c6574c:41491 {}] regionserver.HRegionServer(959): stopping server 607fd5c6574c,41491,1733295133720 2024-12-04T06:52:51,054 INFO [RS:1;607fd5c6574c:41491 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T06:52:51,054 INFO [RS:1;607fd5c6574c:41491 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;607fd5c6574c:41491. 2024-12-04T06:52:51,054 DEBUG [RS:1;607fd5c6574c:41491 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T06:52:51,055 DEBUG [RS:1;607fd5c6574c:41491 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:52:51,055 INFO [RS:1;607fd5c6574c:41491 {}] regionserver.HRegionServer(976): stopping server 607fd5c6574c,41491,1733295133720; all regions closed. 2024-12-04T06:52:51,055 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:51,055 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:51,055 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:51,055 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:51,056 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:51,056 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:51,056 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:51,056 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 2024-12-04T06:52:51,057 WARN [IPC Server handler 1 on default port 41097 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 has not been closed. Lease recovery is in progress. RecoveryId = 1068 for block blk_1073741837_1013 2024-12-04T06:52:51,057 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 after 1ms 2024-12-04T06:52:51,260 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/.tmp/info/df7d2b6d61664309a2bbc9c14e42426b 2024-12-04T06:52:51,267 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/.tmp/info/df7d2b6d61664309a2bbc9c14e42426b as hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/info/df7d2b6d61664309a2bbc9c14e42426b 2024-12-04T06:52:51,273 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/info/df7d2b6d61664309a2bbc9c14e42426b, entries=6, sequenceid=55, filesize=11.2 K 2024-12-04T06:52:51,274 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=6.30 KB/6455 for 6b5e5651a1c0539271423f1bc707b389 in 440ms, sequenceid=55, compaction requested=true 2024-12-04T06:52:51,274 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6b5e5651a1c0539271423f1bc707b389: 2024-12-04T06:52:51,274 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.6 K, sizeToCheck=16.0 K 2024-12-04T06:52:51,275 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T06:52:51,275 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/info/33d97ff7cd70430c8ffb618a6cedf46f because midkey is the same as first or last row 2024-12-04T06:52:51,275 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-04T06:52:51,275 INFO [RS:0;607fd5c6574c:33613 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-04T06:52:51,275 INFO [RS:0;607fd5c6574c:33613 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-04T06:52:51,275 INFO [RS:0;607fd5c6574c:33613 {}] regionserver.HRegionServer(3091): Received CLOSE for 6b5e5651a1c0539271423f1bc707b389 2024-12-04T06:52:51,275 INFO [RS:0;607fd5c6574c:33613 {}] regionserver.HRegionServer(959): stopping server 607fd5c6574c,33613,1733295132530 2024-12-04T06:52:51,275 INFO [RS:0;607fd5c6574c:33613 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T06:52:51,275 INFO [RS:0;607fd5c6574c:33613 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;607fd5c6574c:33613. 2024-12-04T06:52:51,275 DEBUG [RS:0;607fd5c6574c:33613 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T06:52:51,275 DEBUG [RS:0;607fd5c6574c:33613 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:52:51,275 INFO [RS:0;607fd5c6574c:33613 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-04T06:52:51,275 INFO [RS:0;607fd5c6574c:33613 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-04T06:52:51,275 INFO [RS:0;607fd5c6574c:33613 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-04T06:52:51,276 INFO [RS:0;607fd5c6574c:33613 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-04T06:52:51,275 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 6b5e5651a1c0539271423f1bc707b389, disabling compactions & flushes 2024-12-04T06:52:51,276 INFO [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733295133825.6b5e5651a1c0539271423f1bc707b389. 2024-12-04T06:52:51,276 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733295133825.6b5e5651a1c0539271423f1bc707b389. 2024-12-04T06:52:51,276 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733295133825.6b5e5651a1c0539271423f1bc707b389. after waiting 0 ms 2024-12-04T06:52:51,276 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733295133825.6b5e5651a1c0539271423f1bc707b389. 2024-12-04T06:52:51,276 INFO [RS:0;607fd5c6574c:33613 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-04T06:52:51,276 DEBUG [RS:0;607fd5c6574c:33613 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 6b5e5651a1c0539271423f1bc707b389=TestLogRolling-testLogRollOnDatanodeDeath,,1733295133825.6b5e5651a1c0539271423f1bc707b389.} 2024-12-04T06:52:51,276 DEBUG [RS:0;607fd5c6574c:33613 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 6b5e5651a1c0539271423f1bc707b389 2024-12-04T06:52:51,276 INFO [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 6b5e5651a1c0539271423f1bc707b389 1/1 column families, dataSize=6.30 KB heapSize=7 KB 2024-12-04T06:52:51,276 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-04T06:52:51,276 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-04T06:52:51,276 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-04T06:52:51,276 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-04T06:52:51,276 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-04T06:52:51,276 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-12-04T06:52:51,277 ERROR [FSHLog-0-hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12-prefix:607fd5c6574c,33613,1733295132530.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:51,277 WARN [FSHLog-0-hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12-prefix:607fd5c6574c,33613,1733295132530.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:51,277 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 607fd5c6574c%2C33613%2C1733295132530.meta:.meta(num 1733295133523) roll requested 2024-12-04T06:52:51,277 INFO [regionserver/607fd5c6574c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C33613%2C1733295132530.meta.1733295171277.meta 2024-12-04T06:52:51,282 WARN [Thread-1029 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1069 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35439 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:51,282 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/.tmp/info/a9df57bf071449a49230af8385cb2e04 is 1080, key is row0018/info:/1733295170836/Put/seqid=0 2024-12-04T06:52:51,281 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:60608 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741884_1069] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data6]'}, localName='127.0.0.1:34091', datanodeUuid='99e7a4f4-4680-4b9c-a6d5-cf6b801391c5', xmitsInProgress=0}:Exception transferring block BP-1601631713-172.17.0.2-1733295131567:blk_1073741884_1069 to mirror 127.0.0.1:35439 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:51,282 WARN [Thread-1029 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741884_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34091,DS-bc4841fe-8345-4c74-873d-b57a5538e87b,DISK], DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK]) is bad. 2024-12-04T06:52:51,282 WARN [Thread-1029 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741884_1069 2024-12-04T06:52:51,282 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:60608 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741884_1069] {}] datanode.BlockReceiver(316): Block 1073741884 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-04T06:52:51,282 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:60608 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741884_1069] {}] datanode.DataXceiver(331): 127.0.0.1:34091:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60608 dst: /127.0.0.1:34091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:51,282 WARN [Thread-1029 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK] 2024-12-04T06:52:51,285 WARN [Thread-1030 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1070 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35439 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:51,285 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:60624 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741885_1070] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data6]'}, localName='127.0.0.1:34091', datanodeUuid='99e7a4f4-4680-4b9c-a6d5-cf6b801391c5', xmitsInProgress=0}:Exception transferring block BP-1601631713-172.17.0.2-1733295131567:blk_1073741885_1070 to mirror 127.0.0.1:35439 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:51,285 WARN [Thread-1030 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741885_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34091,DS-bc4841fe-8345-4c74-873d-b57a5538e87b,DISK], DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK]) is bad. 2024-12-04T06:52:51,285 WARN [Thread-1030 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741885_1070 2024-12-04T06:52:51,285 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:60624 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741885_1070] {}] datanode.BlockReceiver(316): Block 1073741885 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-04T06:52:51,285 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:60624 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741885_1070] {}] datanode.DataXceiver(331): 127.0.0.1:34091:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60624 dst: /127.0.0.1:34091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:51,286 WARN [Thread-1030 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK] 2024-12-04T06:52:51,290 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:51,290 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:51,290 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:51,290 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:51,290 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:51,291 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295171277.meta 2024-12-04T06:52:51,291 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:51,291 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45609,DS-9a13451c-d980-4ddd-a5b5-c8c45906d80c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:51,292 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43901:43901),(127.0.0.1/127.0.0.1:37195:37195)] 2024-12-04T06:52:51,292 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta is not closed yet, will try archiving it next time 2024-12-04T06:52:51,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741887_1072 (size=11421) 2024-12-04T06:52:51,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39643 is added to blk_1073741887_1072 (size=11421) 2024-12-04T06:52:51,293 INFO [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.30 KB at sequenceid=64 (bloomFilter=true), to=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/.tmp/info/a9df57bf071449a49230af8385cb2e04 2024-12-04T06:52:51,293 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta 2024-12-04T06:52:51,294 WARN [IPC Server handler 1 on default port 41097 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta has not been closed. Lease recovery is in progress. RecoveryId = 1073 for block blk_1073741834_1010 2024-12-04T06:52:51,294 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta after 1ms 2024-12-04T06:52:51,299 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/.tmp/info/a9df57bf071449a49230af8385cb2e04 as hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/info/a9df57bf071449a49230af8385cb2e04 2024-12-04T06:52:51,306 INFO [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/info/a9df57bf071449a49230af8385cb2e04, entries=6, sequenceid=64, filesize=11.2 K 2024-12-04T06:52:51,307 INFO [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~6.30 KB/6455, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 6b5e5651a1c0539271423f1bc707b389 in 31ms, sequenceid=64, compaction requested=true 2024-12-04T06:52:51,308 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733295133825.6b5e5651a1c0539271423f1bc707b389.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/info/7b51ca98eee940d987c39d84eda40948, hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/info/f19a864c9c644dcba50f6d61e2fc3cbf, hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/info/1b2dd89566c04f9d8f935cb7ecbeac10] to archive 2024-12-04T06:52:51,309 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733295133825.6b5e5651a1c0539271423f1bc707b389.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-04T06:52:51,311 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733295133825.6b5e5651a1c0539271423f1bc707b389.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/info/7b51ca98eee940d987c39d84eda40948 to hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/info/7b51ca98eee940d987c39d84eda40948 2024-12-04T06:52:51,312 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/hbase/meta/1588230740/.tmp/info/e4210dd9da024ecb800a874f62e29141 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1733295133825.6b5e5651a1c0539271423f1bc707b389./info:regioninfo/1733295134201/Put/seqid=0 2024-12-04T06:52:51,313 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733295133825.6b5e5651a1c0539271423f1bc707b389.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/info/f19a864c9c644dcba50f6d61e2fc3cbf to hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/info/f19a864c9c644dcba50f6d61e2fc3cbf 2024-12-04T06:52:51,315 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733295133825.6b5e5651a1c0539271423f1bc707b389.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/info/1b2dd89566c04f9d8f935cb7ecbeac10 to hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/info/1b2dd89566c04f9d8f935cb7ecbeac10 2024-12-04T06:52:51,315 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733295133825.6b5e5651a1c0539271423f1bc707b389.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=607fd5c6574c:45843 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-04T06:52:51,316 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733295133825.6b5e5651a1c0539271423f1bc707b389.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [7b51ca98eee940d987c39d84eda40948=10347, f19a864c9c644dcba50f6d61e2fc3cbf=12506, 1b2dd89566c04f9d8f935cb7ecbeac10=6027] 2024-12-04T06:52:51,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39643 is added to blk_1073741888_1074 (size=7089) 2024-12-04T06:52:51,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741888_1074 (size=7089) 2024-12-04T06:52:51,320 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/hbase/meta/1588230740/.tmp/info/e4210dd9da024ecb800a874f62e29141 2024-12-04T06:52:51,321 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6b5e5651a1c0539271423f1bc707b389/recovered.edits/67.seqid, newMaxSeqId=67, maxSeqId=1 2024-12-04T06:52:51,322 INFO [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733295133825.6b5e5651a1c0539271423f1bc707b389. 2024-12-04T06:52:51,322 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 6b5e5651a1c0539271423f1bc707b389: Waiting for close lock at 1733295171275Running coprocessor pre-close hooks at 1733295171275Disabling compacts and flushes for region at 1733295171275Disabling writes for close at 1733295171276 (+1 ms)Obtaining lock to block concurrent updates at 1733295171276Preparing flush snapshotting stores in 6b5e5651a1c0539271423f1bc707b389 at 1733295171276Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1733295133825.6b5e5651a1c0539271423f1bc707b389., syncing WAL and waiting on mvcc, flushsize=dataSize=6455, getHeapSize=7152, getOffHeapSize=0, getCellsCount=6 at 1733295171276Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1733295133825.6b5e5651a1c0539271423f1bc707b389. at 1733295171277 (+1 ms)Flushing 6b5e5651a1c0539271423f1bc707b389/info: creating writer at 1733295171277Flushing 6b5e5651a1c0539271423f1bc707b389/info: appending metadata at 1733295171281 (+4 ms)Flushing 6b5e5651a1c0539271423f1bc707b389/info: closing flushed file at 1733295171281Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@666f4496: reopening flushed file at 1733295171299 (+18 ms)Finished flush of dataSize ~6.30 KB/6455, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 6b5e5651a1c0539271423f1bc707b389 in 31ms, sequenceid=64, compaction requested=true at 1733295171307 (+8 ms)Writing region close event to WAL at 1733295171317 (+10 ms)Running coprocessor post-close hooks at 1733295171322 (+5 ms)Closed at 1733295171322 2024-12-04T06:52:51,322 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733295133825.6b5e5651a1c0539271423f1bc707b389. 2024-12-04T06:52:51,342 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/hbase/meta/1588230740/.tmp/ns/de80fc54f56b4bcba2c10fd30e7f77b6 is 43, key is default/ns:d/1733295133588/Put/seqid=0 2024-12-04T06:52:51,345 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:60676 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741889_1075] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data6]'}, localName='127.0.0.1:34091', datanodeUuid='99e7a4f4-4680-4b9c-a6d5-cf6b801391c5', xmitsInProgress=0}:Exception transferring block BP-1601631713-172.17.0.2-1733295131567:blk_1073741889_1075 to mirror 127.0.0.1:35439 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:51,345 WARN [Thread-1050 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1075 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35439 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:51,345 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:60676 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741889_1075] {}] datanode.BlockReceiver(316): Block 1073741889 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-04T06:52:51,345 WARN [Thread-1050 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741889_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34091,DS-bc4841fe-8345-4c74-873d-b57a5538e87b,DISK], DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK]) is bad. 2024-12-04T06:52:51,345 WARN [Thread-1050 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741889_1075 2024-12-04T06:52:51,345 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1768527484_22 at /127.0.0.1:60676 [Receiving block BP-1601631713-172.17.0.2-1733295131567:blk_1073741889_1075] {}] datanode.DataXceiver(331): 127.0.0.1:34091:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60676 dst: /127.0.0.1:34091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:51,346 WARN [Thread-1050 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK] 2024-12-04T06:52:51,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39643 is added to blk_1073741890_1076 (size=5153) 2024-12-04T06:52:51,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741890_1076 (size=5153) 2024-12-04T06:52:51,353 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/hbase/meta/1588230740/.tmp/ns/de80fc54f56b4bcba2c10fd30e7f77b6 2024-12-04T06:52:51,380 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/hbase/meta/1588230740/.tmp/table/fbad1a79eaa24faa9ea8f87a68856619 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1733295134215/Put/seqid=0 2024-12-04T06:52:51,382 WARN [Thread-1057 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1077 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:52:51,382 WARN [Thread-1057 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1601631713-172.17.0.2-1733295131567:blk_1073741891_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK], DatanodeInfoWithStorage[127.0.0.1:39643,DS-2f609111-e44f-4cdd-9e6d-8dc690e52386,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK]) is bad. 2024-12-04T06:52:51,382 WARN [Thread-1057 {}] hdfs.DataStreamer(1850): Abandoning BP-1601631713-172.17.0.2-1733295131567:blk_1073741891_1077 2024-12-04T06:52:51,383 WARN [Thread-1057 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35439,DS-7fba1358-0431-4d5e-b282-2c5ee8ff7f36,DISK] 2024-12-04T06:52:51,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741892_1078 (size=5424) 2024-12-04T06:52:51,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39643 is added to blk_1073741892_1078 (size=5424) 2024-12-04T06:52:51,389 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/hbase/meta/1588230740/.tmp/table/fbad1a79eaa24faa9ea8f87a68856619 2024-12-04T06:52:51,396 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/hbase/meta/1588230740/.tmp/info/e4210dd9da024ecb800a874f62e29141 as hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/hbase/meta/1588230740/info/e4210dd9da024ecb800a874f62e29141 2024-12-04T06:52:51,402 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/hbase/meta/1588230740/info/e4210dd9da024ecb800a874f62e29141, entries=10, sequenceid=11, filesize=6.9 K 2024-12-04T06:52:51,403 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/hbase/meta/1588230740/.tmp/ns/de80fc54f56b4bcba2c10fd30e7f77b6 as hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/hbase/meta/1588230740/ns/de80fc54f56b4bcba2c10fd30e7f77b6 2024-12-04T06:52:51,409 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/hbase/meta/1588230740/ns/de80fc54f56b4bcba2c10fd30e7f77b6, entries=2, sequenceid=11, filesize=5.0 K 2024-12-04T06:52:51,410 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/hbase/meta/1588230740/.tmp/table/fbad1a79eaa24faa9ea8f87a68856619 as hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/hbase/meta/1588230740/table/fbad1a79eaa24faa9ea8f87a68856619 2024-12-04T06:52:51,416 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/hbase/meta/1588230740/table/fbad1a79eaa24faa9ea8f87a68856619, entries=2, sequenceid=11, filesize=5.3 K 2024-12-04T06:52:51,417 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 141ms, sequenceid=11, compaction requested=false 2024-12-04T06:52:51,423 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-04T06:52:51,424 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-04T06:52:51,424 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-04T06:52:51,424 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733295171276Running coprocessor pre-close hooks at 1733295171276Disabling compacts and flushes for region at 1733295171276Disabling writes for close at 1733295171276Obtaining lock to block concurrent updates at 1733295171276Preparing flush snapshotting stores in 1588230740 at 1733295171276Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1733295171277 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733295171293 (+16 ms)Flushing 1588230740/info: creating writer at 1733295171293Flushing 1588230740/info: appending metadata at 1733295171312 (+19 ms)Flushing 1588230740/info: closing flushed file at 1733295171312Flushing 1588230740/ns: creating writer at 1733295171325 (+13 ms)Flushing 1588230740/ns: appending metadata at 1733295171341 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1733295171341Flushing 1588230740/table: creating writer at 1733295171360 (+19 ms)Flushing 1588230740/table: appending metadata at 1733295171380 (+20 ms)Flushing 1588230740/table: closing flushed file at 1733295171380Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6edd3f81: reopening flushed file at 1733295171395 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@45b23cc8: reopening flushed file at 1733295171402 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7d511d70: reopening flushed file at 1733295171409 (+7 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 141ms, sequenceid=11, compaction requested=false at 1733295171417 (+8 ms)Writing region close event to WAL at 1733295171419 (+2 ms)Running coprocessor post-close hooks at 1733295171424 (+5 ms)Closed at 1733295171424 2024-12-04T06:52:51,425 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-04T06:52:51,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39643 is added to blk_1073741862_1045 (size=12100) 2024-12-04T06:52:51,476 INFO [RS:0;607fd5c6574c:33613 {}] regionserver.HRegionServer(976): stopping server 607fd5c6574c,33613,1733295132530; all regions closed. 2024-12-04T06:52:51,477 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:51,477 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:51,477 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:51,477 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:51,478 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:51,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741886_1071 (size=825) 2024-12-04T06:52:51,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39643 is added to blk_1073741886_1071 (size=825) 2024-12-04T06:52:51,784 INFO [regionserver/607fd5c6574c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T06:52:51,808 INFO [regionserver/607fd5c6574c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-04T06:52:51,808 INFO [regionserver/607fd5c6574c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-04T06:52:51,875 INFO [regionserver/607fd5c6574c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-04T06:52:51,875 INFO [regionserver/607fd5c6574c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-04T06:52:52,763 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-04T06:52:52,764 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-04T06:52:52,764 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-04T06:52:52,804 INFO [regionserver/607fd5c6574c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T06:52:53,167 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@10e32bc0[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39643, datanodeUuid=068c6fc1-5d76-4592-a144-8a94d67736fb, infoPort=43901, infoSecurePort=0, ipcPort=38665, storageInfo=lv=-57;cid=testClusterID;nsid=2075415593;c=1733295131567):Failed to transfer BP-1601631713-172.17.0.2-1733295131567:blk_1073741832_1008 to 127.0.0.1:35439 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:53,167 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@27a04a96[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39643, datanodeUuid=068c6fc1-5d76-4592-a144-8a94d67736fb, infoPort=43901, infoSecurePort=0, ipcPort=38665, storageInfo=lv=-57;cid=testClusterID;nsid=2075415593;c=1733295131567):Failed to transfer BP-1601631713-172.17.0.2-1733295131567:blk_1073741836_1012 to 127.0.0.1:35439 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:53,617 INFO [master/607fd5c6574c:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-04T06:52:53,617 INFO [master/607fd5c6574c:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-04T06:52:54,166 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@27a04a96[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39643, datanodeUuid=068c6fc1-5d76-4592-a144-8a94d67736fb, infoPort=43901, infoSecurePort=0, ipcPort=38665, storageInfo=lv=-57;cid=testClusterID;nsid=2075415593;c=1733295131567):Failed to transfer BP-1601631713-172.17.0.2-1733295131567:blk_1073741826_1002 to 127.0.0.1:35439 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:54,166 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@10e32bc0[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39643, datanodeUuid=068c6fc1-5d76-4592-a144-8a94d67736fb, infoPort=43901, infoSecurePort=0, ipcPort=38665, storageInfo=lv=-57;cid=testClusterID;nsid=2075415593;c=1733295131567):Failed to transfer BP-1601631713-172.17.0.2-1733295131567:blk_1073741828_1004 to 127.0.0.1:35439 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:52:55,058 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 after 4002ms 2024-12-04T06:52:55,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741827_1003 (size=196) 2024-12-04T06:52:55,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741825_1001 (size=7) 2024-12-04T06:52:55,196 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@5fa1d2c8 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1601631713-172.17.0.2-1733295131567:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:45609,null,null]) java.net.ConnectException: Call From 607fd5c6574c/172.17.0.2 to localhost:41113 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-04T06:52:55,295 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta after 4002ms 2024-12-04T06:52:56,056 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-04T06:52:56,059 DEBUG [RS:1;607fd5c6574c:41491 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/oldWALs 2024-12-04T06:52:56,059 INFO [RS:1;607fd5c6574c:41491 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 607fd5c6574c%2C41491%2C1733295133720:(num 1733295133925) 2024-12-04T06:52:56,059 DEBUG [RS:1;607fd5c6574c:41491 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:52:56,059 INFO [RS:1;607fd5c6574c:41491 {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T06:52:56,059 INFO [RS:1;607fd5c6574c:41491 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T06:52:56,059 INFO [RS:1;607fd5c6574c:41491 {}] hbase.ChoreService(370): Chore service for: regionserver/607fd5c6574c:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-04T06:52:56,059 INFO [RS:1;607fd5c6574c:41491 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-04T06:52:56,059 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T06:52:56,059 INFO [RS:1;607fd5c6574c:41491 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-04T06:52:56,059 INFO [RS:1;607fd5c6574c:41491 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-04T06:52:56,059 INFO [RS:1;607fd5c6574c:41491 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T06:52:56,060 INFO [RS:1;607fd5c6574c:41491 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41491 2024-12-04T06:52:56,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41491-0x1017c3eb99a0002, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/607fd5c6574c,41491,1733295133720 2024-12-04T06:52:56,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45843-0x1017c3eb99a0000, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T06:52:56,063 INFO [RS:1;607fd5c6574c:41491 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T06:52:56,063 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [607fd5c6574c,41491,1733295133720] 2024-12-04T06:52:56,064 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:52:56,065 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/607fd5c6574c,41491,1733295133720 already deleted, retry=false 2024-12-04T06:52:56,065 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 607fd5c6574c,41491,1733295133720 expired; onlineServers=1 2024-12-04T06:52:56,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41491-0x1017c3eb99a0002, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T06:52:56,164 INFO [RS:1;607fd5c6574c:41491 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T06:52:56,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41491-0x1017c3eb99a0002, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T06:52:56,164 INFO [RS:1;607fd5c6574c:41491 {}] regionserver.HRegionServer(1031): Exiting; stopping=607fd5c6574c,41491,1733295133720; zookeeper connection closed. 2024-12-04T06:52:56,165 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@69b71030 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@69b71030 2024-12-04T06:52:56,323 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:52:56,343 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:52:56,343 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:52:56,344 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:52:56,344 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:52:56,344 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:52:56,356 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:52:56,356 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:52:56,480 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-04T06:52:56,484 DEBUG [RS:0;607fd5c6574c:33613 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/oldWALs 2024-12-04T06:52:56,484 INFO [RS:0;607fd5c6574c:33613 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 607fd5c6574c%2C33613%2C1733295132530.meta:.meta(num 1733295171277) 2024-12-04T06:52:56,484 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:56,485 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:56,485 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:56,485 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:56,485 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:56,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741881_1065 (size=14682) 2024-12-04T06:52:56,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39643 is added to blk_1073741881_1065 (size=14682) 2024-12-04T06:52:56,490 DEBUG [RS:0;607fd5c6574c:33613 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/oldWALs 2024-12-04T06:52:56,490 INFO [RS:0;607fd5c6574c:33613 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 607fd5c6574c%2C33613%2C1733295132530:(num 1733295170806) 2024-12-04T06:52:56,490 DEBUG [RS:0;607fd5c6574c:33613 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:52:56,491 INFO [RS:0;607fd5c6574c:33613 {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T06:52:56,491 INFO [RS:0;607fd5c6574c:33613 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T06:52:56,491 INFO [RS:0;607fd5c6574c:33613 {}] hbase.ChoreService(370): Chore service for: regionserver/607fd5c6574c:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-04T06:52:56,491 INFO [RS:0;607fd5c6574c:33613 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T06:52:56,491 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T06:52:56,491 INFO [RS:0;607fd5c6574c:33613 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33613 2024-12-04T06:52:56,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45843-0x1017c3eb99a0000, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T06:52:56,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33613-0x1017c3eb99a0001, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/607fd5c6574c,33613,1733295132530 2024-12-04T06:52:56,495 INFO [RS:0;607fd5c6574c:33613 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T06:52:56,496 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [607fd5c6574c,33613,1733295132530] 2024-12-04T06:52:56,498 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/607fd5c6574c,33613,1733295132530 already deleted, retry=false 2024-12-04T06:52:56,498 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 607fd5c6574c,33613,1733295132530 expired; onlineServers=0 2024-12-04T06:52:56,498 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '607fd5c6574c,45843,1733295132451' ***** 2024-12-04T06:52:56,498 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-04T06:52:56,498 INFO [M:0;607fd5c6574c:45843 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T06:52:56,498 INFO [M:0;607fd5c6574c:45843 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T06:52:56,498 DEBUG [M:0;607fd5c6574c:45843 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-04T06:52:56,498 DEBUG [M:0;607fd5c6574c:45843 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-04T06:52:56,498 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-04T06:52:56,498 DEBUG [master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.small.0-1733295132747 {}] cleaner.HFileCleaner(306): Exit Thread[master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.small.0-1733295132747,5,FailOnTimeoutGroup] 2024-12-04T06:52:56,498 DEBUG [master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.large.0-1733295132747 {}] cleaner.HFileCleaner(306): Exit Thread[master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.large.0-1733295132747,5,FailOnTimeoutGroup] 2024-12-04T06:52:56,498 INFO [M:0;607fd5c6574c:45843 {}] hbase.ChoreService(370): Chore service for: master/607fd5c6574c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-04T06:52:56,498 INFO [M:0;607fd5c6574c:45843 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T06:52:56,498 DEBUG [M:0;607fd5c6574c:45843 {}] master.HMaster(1795): Stopping service threads 2024-12-04T06:52:56,499 INFO [M:0;607fd5c6574c:45843 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-04T06:52:56,499 INFO [M:0;607fd5c6574c:45843 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-04T06:52:56,499 INFO [M:0;607fd5c6574c:45843 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-04T06:52:56,499 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-04T06:52:56,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45843-0x1017c3eb99a0000, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-04T06:52:56,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45843-0x1017c3eb99a0000, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:56,500 DEBUG [M:0;607fd5c6574c:45843 {}] zookeeper.ZKUtil(347): master:45843-0x1017c3eb99a0000, quorum=127.0.0.1:54010, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-04T06:52:56,500 WARN [M:0;607fd5c6574c:45843 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-04T06:52:56,501 INFO [M:0;607fd5c6574c:45843 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/.lastflushedseqids 2024-12-04T06:52:56,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39643 is added to blk_1073741893_1079 (size=130) 2024-12-04T06:52:56,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741893_1079 (size=130) 2024-12-04T06:52:56,517 INFO [M:0;607fd5c6574c:45843 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-04T06:52:56,518 INFO [M:0;607fd5c6574c:45843 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-04T06:52:56,518 DEBUG [M:0;607fd5c6574c:45843 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-04T06:52:56,518 INFO [M:0;607fd5c6574c:45843 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:52:56,518 DEBUG [M:0;607fd5c6574c:45843 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:52:56,518 DEBUG [M:0;607fd5c6574c:45843 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-04T06:52:56,518 DEBUG [M:0;607fd5c6574c:45843 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:52:56,518 INFO [M:0;607fd5c6574c:45843 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-12-04T06:52:56,540 DEBUG [M:0;607fd5c6574c:45843 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c551b18839844db399165a062c292ac4 is 82, key is hbase:meta,,1/info:regioninfo/1733295133569/Put/seqid=0 2024-12-04T06:52:56,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39643 is added to blk_1073741894_1080 (size=5672) 2024-12-04T06:52:56,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741894_1080 (size=5672) 2024-12-04T06:52:56,547 INFO [M:0;607fd5c6574c:45843 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c551b18839844db399165a062c292ac4 2024-12-04T06:52:56,576 DEBUG [M:0;607fd5c6574c:45843 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a503ee6ee810423b8ac13cf33a3ecb0e is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733295134221/Put/seqid=0 2024-12-04T06:52:56,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741895_1081 (size=6255) 2024-12-04T06:52:56,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39643 is added to blk_1073741895_1081 (size=6255) 2024-12-04T06:52:56,582 INFO [M:0;607fd5c6574c:45843 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a503ee6ee810423b8ac13cf33a3ecb0e 2024-12-04T06:52:56,588 INFO [M:0;607fd5c6574c:45843 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a503ee6ee810423b8ac13cf33a3ecb0e 2024-12-04T06:52:56,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33613-0x1017c3eb99a0001, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T06:52:56,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33613-0x1017c3eb99a0001, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T06:52:56,597 INFO [RS:0;607fd5c6574c:33613 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T06:52:56,597 INFO [RS:0;607fd5c6574c:33613 {}] regionserver.HRegionServer(1031): Exiting; stopping=607fd5c6574c,33613,1733295132530; zookeeper connection closed. 2024-12-04T06:52:56,597 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@20f8fec3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@20f8fec3 2024-12-04T06:52:56,599 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-12-04T06:52:56,606 DEBUG [M:0;607fd5c6574c:45843 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8cb3c32a532c4cd5abbb187e863b64c3 is 69, key is 607fd5c6574c,33613,1733295132530/rs:state/1733295132783/Put/seqid=0 2024-12-04T06:52:56,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741896_1082 (size=5224) 2024-12-04T06:52:56,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39643 is added to blk_1073741896_1082 (size=5224) 2024-12-04T06:52:56,615 INFO [M:0;607fd5c6574c:45843 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8cb3c32a532c4cd5abbb187e863b64c3 2024-12-04T06:52:56,636 DEBUG [M:0;607fd5c6574c:45843 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a31bdf6e24374035b8bd3cc6de242b49 is 52, key is load_balancer_on/state:d/1733295133674/Put/seqid=0 2024-12-04T06:52:56,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39643 is added to blk_1073741897_1083 (size=5056) 2024-12-04T06:52:56,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741897_1083 (size=5056) 2024-12-04T06:52:56,643 INFO [M:0;607fd5c6574c:45843 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a31bdf6e24374035b8bd3cc6de242b49 2024-12-04T06:52:56,650 DEBUG [M:0;607fd5c6574c:45843 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c551b18839844db399165a062c292ac4 as hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c551b18839844db399165a062c292ac4 2024-12-04T06:52:56,657 INFO [M:0;607fd5c6574c:45843 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c551b18839844db399165a062c292ac4, entries=8, sequenceid=60, filesize=5.5 K 2024-12-04T06:52:56,658 DEBUG [M:0;607fd5c6574c:45843 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a503ee6ee810423b8ac13cf33a3ecb0e as hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a503ee6ee810423b8ac13cf33a3ecb0e 2024-12-04T06:52:56,664 INFO [M:0;607fd5c6574c:45843 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a503ee6ee810423b8ac13cf33a3ecb0e 2024-12-04T06:52:56,665 INFO [M:0;607fd5c6574c:45843 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a503ee6ee810423b8ac13cf33a3ecb0e, entries=6, sequenceid=60, filesize=6.1 K 2024-12-04T06:52:56,666 DEBUG [M:0;607fd5c6574c:45843 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8cb3c32a532c4cd5abbb187e863b64c3 as hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8cb3c32a532c4cd5abbb187e863b64c3 2024-12-04T06:52:56,672 INFO [M:0;607fd5c6574c:45843 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8cb3c32a532c4cd5abbb187e863b64c3, entries=2, sequenceid=60, filesize=5.1 K 2024-12-04T06:52:56,673 DEBUG [M:0;607fd5c6574c:45843 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a31bdf6e24374035b8bd3cc6de242b49 as hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a31bdf6e24374035b8bd3cc6de242b49 2024-12-04T06:52:56,679 INFO [M:0;607fd5c6574c:45843 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a31bdf6e24374035b8bd3cc6de242b49, entries=1, sequenceid=60, filesize=4.9 K 2024-12-04T06:52:56,680 INFO [M:0;607fd5c6574c:45843 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 162ms, sequenceid=60, compaction requested=false 2024-12-04T06:52:56,682 INFO [M:0;607fd5c6574c:45843 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:52:56,682 DEBUG [M:0;607fd5c6574c:45843 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733295176518Disabling compacts and flushes for region at 1733295176518Disabling writes for close at 1733295176518Obtaining lock to block concurrent updates at 1733295176518Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733295176518Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1733295176518Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733295176519 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733295176519Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733295176539 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733295176539Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733295176558 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733295176575 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733295176575Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733295176588 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733295176606 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733295176606Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733295176620 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733295176636 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733295176636Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5dc155c7: reopening flushed file at 1733295176648 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5a19d06: reopening flushed file at 1733295176657 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@39d1316e: reopening flushed file at 1733295176665 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@29e2a059: reopening flushed file at 1733295176672 (+7 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 162ms, sequenceid=60, compaction requested=false at 1733295176680 (+8 ms)Writing region close event to WAL at 1733295176682 (+2 ms)Closed at 1733295176682 2024-12-04T06:52:56,683 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:56,683 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:56,683 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:56,683 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:56,683 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:52:56,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741879_1062 (size=1045) 2024-12-04T06:52:56,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39643 is added to blk_1073741879_1062 (size=1045) 2024-12-04T06:52:56,687 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T06:52:56,687 INFO [M:0;607fd5c6574c:45843 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-04T06:52:56,687 INFO [M:0;607fd5c6574c:45843 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45843 2024-12-04T06:52:56,688 INFO [M:0;607fd5c6574c:45843 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T06:52:56,792 INFO [M:0;607fd5c6574c:45843 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T06:52:56,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45843-0x1017c3eb99a0000, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T06:52:56,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45843-0x1017c3eb99a0000, quorum=127.0.0.1:54010, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T06:52:56,795 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3740407e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:52:56,795 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2e234cf7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T06:52:56,795 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T06:52:56,795 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@568b1686{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T06:52:56,795 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7d04364e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/hadoop.log.dir/,STOPPED} 2024-12-04T06:52:56,797 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@1041bf09 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1601631713-172.17.0.2-1733295131567:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:45609,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:41113 , LocalHost:localPort 607fd5c6574c/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-04T06:52:56,798 WARN [BP-1601631713-172.17.0.2-1733295131567 heartbeating to localhost/127.0.0.1:41097 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T06:52:56,798 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T06:52:56,798 WARN [BP-1601631713-172.17.0.2-1733295131567 heartbeating to localhost/127.0.0.1:41097 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1601631713-172.17.0.2-1733295131567 (Datanode Uuid 068c6fc1-5d76-4592-a144-8a94d67736fb) service to localhost/127.0.0.1:41097 2024-12-04T06:52:56,798 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T06:52:56,799 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data3/current/BP-1601631713-172.17.0.2-1733295131567 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:52:56,799 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@41271277 {}] ipc.Client$Connection(956): Interrupted while trying for connection 2024-12-04T06:52:56,799 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data4/current/BP-1601631713-172.17.0.2-1733295131567 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:52:56,799 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@41271277 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1601631713-172.17.0.2-1733295131567:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:45609,null,null]) java.net.ConnectException: Call From 607fd5c6574c/172.17.0.2 to localhost:41113 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-04T06:52:56,799 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T06:52:56,799 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@41271277 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1601631713-172.17.0.2-1733295131567:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:39643,null,null], DatanodeInfoWithStorage[127.0.0.1:45609,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-1601631713-172.17.0.2-1733295131567 2024-12-04T06:52:56,807 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2047cbbb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:52:56,807 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2021586{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T06:52:56,807 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T06:52:56,807 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ff5703b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T06:52:56,808 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d0f4a9a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/hadoop.log.dir/,STOPPED} 2024-12-04T06:52:56,809 WARN [BP-1601631713-172.17.0.2-1733295131567 heartbeating to localhost/127.0.0.1:41097 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T06:52:56,809 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T06:52:56,809 WARN [BP-1601631713-172.17.0.2-1733295131567 heartbeating to localhost/127.0.0.1:41097 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1601631713-172.17.0.2-1733295131567 (Datanode Uuid 99e7a4f4-4680-4b9c-a6d5-cf6b801391c5) service to localhost/127.0.0.1:41097 2024-12-04T06:52:56,809 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T06:52:56,809 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data5/current/BP-1601631713-172.17.0.2-1733295131567 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:52:56,810 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/cluster_f6d651fb-028b-a687-d730-5cbd162f5d75/data/data6/current/BP-1601631713-172.17.0.2-1733295131567 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:52:56,810 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T06:52:56,815 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@62b96b7c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-04T06:52:56,816 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2fd186ec{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T06:52:56,816 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T06:52:56,816 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2305029e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T06:52:56,816 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@42b52d44{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/hadoop.log.dir/,STOPPED} 2024-12-04T06:52:56,824 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-04T06:52:56,853 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-04T06:52:56,858 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-04T06:52:56,862 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=156 (was 81) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41097 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f8d90befa48.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41097 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:42649 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41097 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42649 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41097 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:41097 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41097 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41097 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:41097 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f8d90befa48.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41097 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41097 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41097 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=448 (was 404) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=285 (was 161) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6609 (was 6551) - AvailableMemoryMB LEAK? - 2024-12-04T06:52:56,870 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=157, OpenFileDescriptor=448, MaxFileDescriptor=1048576, SystemLoadAverage=285, ProcessCount=11, AvailableMemoryMB=6609 2024-12-04T06:52:56,870 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-04T06:52:56,871 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/hadoop.log.dir so I do NOT create it in target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0 2024-12-04T06:52:56,871 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3746517b-7168-b890-1443-97f7d613219b/hadoop.tmp.dir so I do NOT create it in target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0 2024-12-04T06:52:56,871 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/cluster_334a1604-d43e-79e1-6c44-751b06f7fb4e, deleteOnExit=true 2024-12-04T06:52:56,871 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-04T06:52:56,871 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/test.cache.data in system properties and HBase conf 2024-12-04T06:52:56,871 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/hadoop.tmp.dir in system properties and HBase conf 2024-12-04T06:52:56,871 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/hadoop.log.dir in system properties and HBase conf 2024-12-04T06:52:56,871 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-04T06:52:56,871 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-04T06:52:56,871 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-04T06:52:56,871 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-04T06:52:56,872 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-04T06:52:56,872 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-04T06:52:56,872 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-04T06:52:56,872 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-04T06:52:56,872 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-04T06:52:56,872 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-04T06:52:56,872 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-04T06:52:56,872 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-04T06:52:56,872 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-04T06:52:56,872 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/nfs.dump.dir in system properties and HBase conf 2024-12-04T06:52:56,873 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/java.io.tmpdir in system properties and HBase conf 2024-12-04T06:52:56,873 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-04T06:52:56,873 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-04T06:52:56,873 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-04T06:52:56,876 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:52:56,876 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:52:56,877 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:52:56,877 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:52:56,877 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:52:56,878 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:52:56,881 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:52:56,881 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:52:56,881 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:52:56,883 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:52:56,886 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-04T06:52:56,979 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:52:56,984 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T06:52:56,989 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T06:52:56,989 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T06:52:56,989 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-04T06:52:56,993 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:52:56,994 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6dbadaea{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/hadoop.log.dir/,AVAILABLE} 2024-12-04T06:52:56,995 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@48d7ddcf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T06:52:57,065 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:52:57,124 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6495f923{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/java.io.tmpdir/jetty-localhost-37547-hadoop-hdfs-3_4_1-tests_jar-_-any-2206193899104029089/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-04T06:52:57,124 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1663c181{HTTP/1.1, (http/1.1)}{localhost:37547} 2024-12-04T06:52:57,124 INFO [Time-limited test {}] server.Server(415): Started @149649ms 2024-12-04T06:52:57,138 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-04T06:52:57,209 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:52:57,212 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T06:52:57,212 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T06:52:57,212 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T06:52:57,213 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T06:52:57,213 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3de05dd9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/hadoop.log.dir/,AVAILABLE} 2024-12-04T06:52:57,213 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5485abbd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T06:52:57,296 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:52:57,332 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@dd504c3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/java.io.tmpdir/jetty-localhost-43243-hadoop-hdfs-3_4_1-tests_jar-_-any-2895149720234623873/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:52:57,332 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@235b88f{HTTP/1.1, (http/1.1)}{localhost:43243} 2024-12-04T06:52:57,333 INFO [Time-limited test {}] server.Server(415): Started @149858ms 2024-12-04T06:52:57,334 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T06:52:57,363 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:52:57,366 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T06:52:57,367 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T06:52:57,367 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T06:52:57,367 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-04T06:52:57,368 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@179d1ca6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/hadoop.log.dir/,AVAILABLE} 2024-12-04T06:52:57,368 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2a99c341{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T06:52:57,437 WARN [Thread-1176 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/cluster_334a1604-d43e-79e1-6c44-751b06f7fb4e/data/data1/current/BP-364349617-172.17.0.2-1733295176903/current, will proceed with Du for space computation calculation, 2024-12-04T06:52:57,437 WARN [Thread-1177 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/cluster_334a1604-d43e-79e1-6c44-751b06f7fb4e/data/data2/current/BP-364349617-172.17.0.2-1733295176903/current, will proceed with Du for space computation calculation, 2024-12-04T06:52:57,471 WARN [Thread-1155 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T06:52:57,474 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x38b74460f4ae2cf2 with lease ID 0x992fd9214c626579: Processing first storage report for DS-92862bca-953a-4443-8da2-bdbfb8aba738 from datanode DatanodeRegistration(127.0.0.1:44085, datanodeUuid=cbeeeff1-6991-455d-942c-a8ed47d0b9de, infoPort=39055, infoSecurePort=0, ipcPort=42695, storageInfo=lv=-57;cid=testClusterID;nsid=2126775024;c=1733295176903) 2024-12-04T06:52:57,474 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x38b74460f4ae2cf2 with lease ID 0x992fd9214c626579: from storage DS-92862bca-953a-4443-8da2-bdbfb8aba738 node DatanodeRegistration(127.0.0.1:44085, datanodeUuid=cbeeeff1-6991-455d-942c-a8ed47d0b9de, infoPort=39055, infoSecurePort=0, ipcPort=42695, storageInfo=lv=-57;cid=testClusterID;nsid=2126775024;c=1733295176903), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:52:57,474 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x38b74460f4ae2cf2 with lease ID 0x992fd9214c626579: Processing first storage report for DS-24ec8f24-3a06-49e6-b44e-4754fcd77d75 from datanode DatanodeRegistration(127.0.0.1:44085, datanodeUuid=cbeeeff1-6991-455d-942c-a8ed47d0b9de, infoPort=39055, infoSecurePort=0, ipcPort=42695, storageInfo=lv=-57;cid=testClusterID;nsid=2126775024;c=1733295176903) 2024-12-04T06:52:57,474 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x38b74460f4ae2cf2 with lease ID 0x992fd9214c626579: from storage DS-24ec8f24-3a06-49e6-b44e-4754fcd77d75 node DatanodeRegistration(127.0.0.1:44085, datanodeUuid=cbeeeff1-6991-455d-942c-a8ed47d0b9de, infoPort=39055, infoSecurePort=0, ipcPort=42695, storageInfo=lv=-57;cid=testClusterID;nsid=2126775024;c=1733295176903), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:52:57,502 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@43a454f0{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/java.io.tmpdir/jetty-localhost-37447-hadoop-hdfs-3_4_1-tests_jar-_-any-5688578944431451414/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:52:57,502 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4efda07f{HTTP/1.1, (http/1.1)}{localhost:37447} 2024-12-04T06:52:57,502 INFO [Time-limited test {}] server.Server(415): Started @150027ms 2024-12-04T06:52:57,504 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T06:52:57,611 WARN [Thread-1202 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/cluster_334a1604-d43e-79e1-6c44-751b06f7fb4e/data/data3/current/BP-364349617-172.17.0.2-1733295176903/current, will proceed with Du for space computation calculation, 2024-12-04T06:52:57,611 WARN [Thread-1203 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/cluster_334a1604-d43e-79e1-6c44-751b06f7fb4e/data/data4/current/BP-364349617-172.17.0.2-1733295176903/current, will proceed with Du for space computation calculation, 2024-12-04T06:52:57,629 WARN [Thread-1191 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T06:52:57,631 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x65ecbb2143af9bc with lease ID 0x992fd9214c62657a: Processing first storage report for DS-77675f3b-25be-416e-8f7f-41bfc2e1d8be from datanode DatanodeRegistration(127.0.0.1:46345, datanodeUuid=1ca9d8ab-597a-4dac-b687-393278660315, infoPort=32811, infoSecurePort=0, ipcPort=44629, storageInfo=lv=-57;cid=testClusterID;nsid=2126775024;c=1733295176903) 2024-12-04T06:52:57,631 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x65ecbb2143af9bc with lease ID 0x992fd9214c62657a: from storage DS-77675f3b-25be-416e-8f7f-41bfc2e1d8be node DatanodeRegistration(127.0.0.1:46345, datanodeUuid=1ca9d8ab-597a-4dac-b687-393278660315, infoPort=32811, infoSecurePort=0, ipcPort=44629, storageInfo=lv=-57;cid=testClusterID;nsid=2126775024;c=1733295176903), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:52:57,631 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x65ecbb2143af9bc with lease ID 0x992fd9214c62657a: Processing first storage report for DS-385daa33-8ce4-4883-bea0-86d9ff7ff308 from datanode DatanodeRegistration(127.0.0.1:46345, datanodeUuid=1ca9d8ab-597a-4dac-b687-393278660315, infoPort=32811, infoSecurePort=0, ipcPort=44629, storageInfo=lv=-57;cid=testClusterID;nsid=2126775024;c=1733295176903) 2024-12-04T06:52:57,632 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x65ecbb2143af9bc with lease ID 0x992fd9214c62657a: from storage DS-385daa33-8ce4-4883-bea0-86d9ff7ff308 node DatanodeRegistration(127.0.0.1:46345, datanodeUuid=1ca9d8ab-597a-4dac-b687-393278660315, infoPort=32811, infoSecurePort=0, ipcPort=44629, storageInfo=lv=-57;cid=testClusterID;nsid=2126775024;c=1733295176903), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:52:57,733 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0 2024-12-04T06:52:57,736 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/cluster_334a1604-d43e-79e1-6c44-751b06f7fb4e/zookeeper_0, clientPort=57132, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/cluster_334a1604-d43e-79e1-6c44-751b06f7fb4e/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/cluster_334a1604-d43e-79e1-6c44-751b06f7fb4e/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-04T06:52:57,737 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57132 2024-12-04T06:52:57,737 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:52:57,739 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:52:57,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44085 is added to blk_1073741825_1001 (size=7) 2024-12-04T06:52:57,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741825_1001 (size=7) 2024-12-04T06:52:57,756 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b with version=8 2024-12-04T06:52:57,756 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/hbase-staging 2024-12-04T06:52:57,758 INFO [Time-limited test {}] client.ConnectionUtils(128): master/607fd5c6574c:0 server-side Connection retries=45 2024-12-04T06:52:57,758 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T06:52:57,759 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T06:52:57,759 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T06:52:57,759 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T06:52:57,759 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T06:52:57,759 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-04T06:52:57,759 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T06:52:57,760 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34971 2024-12-04T06:52:57,761 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34971 connecting to ZooKeeper ensemble=127.0.0.1:57132 2024-12-04T06:52:57,768 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:349710x0, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T06:52:57,772 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34971-0x1017c3f6a970000 connected 2024-12-04T06:52:57,801 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:52:57,803 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:52:57,806 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34971-0x1017c3f6a970000, quorum=127.0.0.1:57132, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T06:52:57,806 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b, hbase.cluster.distributed=false 2024-12-04T06:52:57,808 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34971-0x1017c3f6a970000, quorum=127.0.0.1:57132, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T06:52:57,812 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34971 2024-12-04T06:52:57,812 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34971 2024-12-04T06:52:57,813 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34971 2024-12-04T06:52:57,813 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34971 2024-12-04T06:52:57,813 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34971 2024-12-04T06:52:57,829 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/607fd5c6574c:0 server-side Connection retries=45 2024-12-04T06:52:57,829 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T06:52:57,829 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T06:52:57,829 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T06:52:57,829 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T06:52:57,829 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T06:52:57,829 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-04T06:52:57,829 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T06:52:57,830 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46331 2024-12-04T06:52:57,831 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46331 connecting to ZooKeeper ensemble=127.0.0.1:57132 2024-12-04T06:52:57,832 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:52:57,834 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:52:57,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:463310x0, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T06:52:57,840 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46331-0x1017c3f6a970001 connected 2024-12-04T06:52:57,840 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46331-0x1017c3f6a970001, quorum=127.0.0.1:57132, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T06:52:57,840 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-04T06:52:57,842 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-04T06:52:57,842 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46331-0x1017c3f6a970001, quorum=127.0.0.1:57132, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-04T06:52:57,844 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46331-0x1017c3f6a970001, quorum=127.0.0.1:57132, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T06:52:57,844 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46331 2024-12-04T06:52:57,844 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46331 2024-12-04T06:52:57,845 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46331 2024-12-04T06:52:57,845 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46331 2024-12-04T06:52:57,845 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46331 2024-12-04T06:52:57,862 DEBUG [M:0;607fd5c6574c:34971 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;607fd5c6574c:34971 2024-12-04T06:52:57,862 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/607fd5c6574c,34971,1733295177758 2024-12-04T06:52:57,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46331-0x1017c3f6a970001, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T06:52:57,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34971-0x1017c3f6a970000, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T06:52:57,873 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34971-0x1017c3f6a970000, quorum=127.0.0.1:57132, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/607fd5c6574c,34971,1733295177758 2024-12-04T06:52:57,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34971-0x1017c3f6a970000, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:57,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46331-0x1017c3f6a970001, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-04T06:52:57,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46331-0x1017c3f6a970001, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:57,888 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34971-0x1017c3f6a970000, quorum=127.0.0.1:57132, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-04T06:52:57,889 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/607fd5c6574c,34971,1733295177758 from backup master directory 2024-12-04T06:52:57,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46331-0x1017c3f6a970001, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T06:52:57,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34971-0x1017c3f6a970000, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/607fd5c6574c,34971,1733295177758 2024-12-04T06:52:57,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34971-0x1017c3f6a970000, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T06:52:57,897 WARN [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T06:52:57,897 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=607fd5c6574c,34971,1733295177758 2024-12-04T06:52:57,906 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/hbase.id] with ID: 7c3f8918-5ce8-43ff-896b-2ca777a16131 2024-12-04T06:52:57,906 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/.tmp/hbase.id 2024-12-04T06:52:57,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44085 is added to blk_1073741826_1002 (size=42) 2024-12-04T06:52:57,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741826_1002 (size=42) 2024-12-04T06:52:57,916 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/.tmp/hbase.id]:[hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/hbase.id] 2024-12-04T06:52:57,930 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:52:57,930 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-04T06:52:57,932 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-04T06:52:57,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34971-0x1017c3f6a970000, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:57,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46331-0x1017c3f6a970001, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:57,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44085 is added to blk_1073741827_1003 (size=196) 2024-12-04T06:52:57,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741827_1003 (size=196) 2024-12-04T06:52:57,949 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T06:52:57,950 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-04T06:52:57,952 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T06:52:57,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44085 is added to blk_1073741828_1004 (size=1189) 2024-12-04T06:52:57,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741828_1004 (size=1189) 2024-12-04T06:52:57,967 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/data/master/store 2024-12-04T06:52:57,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44085 is added to blk_1073741829_1005 (size=34) 2024-12-04T06:52:57,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741829_1005 (size=34) 2024-12-04T06:52:58,066 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:52:58,297 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:52:58,380 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:52:58,381 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-04T06:52:58,381 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:52:58,381 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:52:58,381 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-04T06:52:58,381 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:52:58,381 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:52:58,381 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733295178380Disabling compacts and flushes for region at 1733295178380Disabling writes for close at 1733295178381 (+1 ms)Writing region close event to WAL at 1733295178381Closed at 1733295178381 2024-12-04T06:52:58,382 WARN [master/607fd5c6574c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/data/master/store/.initializing 2024-12-04T06:52:58,382 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/WALs/607fd5c6574c,34971,1733295177758 2024-12-04T06:52:58,385 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=607fd5c6574c%2C34971%2C1733295177758, suffix=, logDir=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/WALs/607fd5c6574c,34971,1733295177758, archiveDir=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/oldWALs, maxLogs=10 2024-12-04T06:52:58,385 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C34971%2C1733295177758.1733295178385 2024-12-04T06:52:58,393 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/WALs/607fd5c6574c,34971,1733295177758/607fd5c6574c%2C34971%2C1733295177758.1733295178385 2024-12-04T06:52:58,398 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39055:39055),(127.0.0.1/127.0.0.1:32811:32811)] 2024-12-04T06:52:58,400 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-04T06:52:58,400 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:52:58,401 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:58,401 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:58,402 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:58,404 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-04T06:52:58,404 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:58,405 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:52:58,405 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:58,406 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-04T06:52:58,406 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:58,407 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T06:52:58,407 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:58,409 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-04T06:52:58,409 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:58,409 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T06:52:58,409 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:58,411 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-04T06:52:58,411 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:58,411 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T06:52:58,411 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:58,412 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:58,413 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:58,414 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:58,414 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:58,415 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-04T06:52:58,416 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:52:58,419 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T06:52:58,419 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=817161, jitterRate=0.03907410800457001}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-04T06:52:58,420 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733295178401Initializing all the Stores at 1733295178402 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295178402Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295178402Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295178402Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295178402Cleaning up temporary data from old regions at 1733295178414 (+12 ms)Region opened successfully at 1733295178420 (+6 ms) 2024-12-04T06:52:58,421 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-04T06:52:58,425 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3179426a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=607fd5c6574c/172.17.0.2:0 2024-12-04T06:52:58,426 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-04T06:52:58,426 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-04T06:52:58,426 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-04T06:52:58,427 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-04T06:52:58,427 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-04T06:52:58,428 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-04T06:52:58,428 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-04T06:52:58,430 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-04T06:52:58,431 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34971-0x1017c3f6a970000, quorum=127.0.0.1:57132, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-04T06:52:58,433 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-04T06:52:58,433 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-04T06:52:58,434 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34971-0x1017c3f6a970000, quorum=127.0.0.1:57132, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-04T06:52:58,437 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-04T06:52:58,438 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-04T06:52:58,438 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34971-0x1017c3f6a970000, quorum=127.0.0.1:57132, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-04T06:52:58,440 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-04T06:52:58,441 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34971-0x1017c3f6a970000, quorum=127.0.0.1:57132, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-04T06:52:58,442 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-04T06:52:58,445 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34971-0x1017c3f6a970000, quorum=127.0.0.1:57132, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-04T06:52:58,447 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-04T06:52:58,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34971-0x1017c3f6a970000, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T06:52:58,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46331-0x1017c3f6a970001, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T06:52:58,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46331-0x1017c3f6a970001, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:58,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34971-0x1017c3f6a970000, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:58,450 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=607fd5c6574c,34971,1733295177758, sessionid=0x1017c3f6a970000, setting cluster-up flag (Was=false) 2024-12-04T06:52:58,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34971-0x1017c3f6a970000, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:58,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46331-0x1017c3f6a970001, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:58,460 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-04T06:52:58,462 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=607fd5c6574c,34971,1733295177758 2024-12-04T06:52:58,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46331-0x1017c3f6a970001, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:58,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34971-0x1017c3f6a970000, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:58,471 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-04T06:52:58,473 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=607fd5c6574c,34971,1733295177758 2024-12-04T06:52:58,474 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-04T06:52:58,476 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-04T06:52:58,477 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-04T06:52:58,477 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-04T06:52:58,477 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 607fd5c6574c,34971,1733295177758 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-04T06:52:58,479 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/607fd5c6574c:0, corePoolSize=5, maxPoolSize=5 2024-12-04T06:52:58,479 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/607fd5c6574c:0, corePoolSize=5, maxPoolSize=5 2024-12-04T06:52:58,479 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/607fd5c6574c:0, corePoolSize=5, maxPoolSize=5 2024-12-04T06:52:58,479 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/607fd5c6574c:0, corePoolSize=5, maxPoolSize=5 2024-12-04T06:52:58,479 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/607fd5c6574c:0, corePoolSize=10, maxPoolSize=10 2024-12-04T06:52:58,479 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:58,479 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/607fd5c6574c:0, corePoolSize=2, maxPoolSize=2 2024-12-04T06:52:58,479 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:58,483 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733295208482 2024-12-04T06:52:58,483 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-04T06:52:58,483 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-04T06:52:58,483 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-04T06:52:58,483 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-04T06:52:58,483 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-04T06:52:58,483 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-04T06:52:58,483 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:58,483 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T06:52:58,483 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-04T06:52:58,483 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-04T06:52:58,484 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-04T06:52:58,484 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-04T06:52:58,484 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-04T06:52:58,484 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-04T06:52:58,485 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:58,485 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-04T06:52:58,489 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.large.0-1733295178484,5,FailOnTimeoutGroup] 2024-12-04T06:52:58,489 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.small.0-1733295178489,5,FailOnTimeoutGroup] 2024-12-04T06:52:58,489 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:58,490 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-04T06:52:58,490 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:58,490 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:58,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741831_1007 (size=1321) 2024-12-04T06:52:58,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44085 is added to blk_1073741831_1007 (size=1321) 2024-12-04T06:52:58,496 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-04T06:52:58,496 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b 2024-12-04T06:52:58,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741832_1008 (size=32) 2024-12-04T06:52:58,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44085 is added to blk_1073741832_1008 (size=32) 2024-12-04T06:52:58,505 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:52:58,506 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-04T06:52:58,508 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-04T06:52:58,508 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:58,508 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:52:58,508 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-04T06:52:58,509 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-04T06:52:58,509 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:58,510 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:52:58,510 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-04T06:52:58,511 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-04T06:52:58,511 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:58,512 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:52:58,512 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-04T06:52:58,513 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-04T06:52:58,513 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:58,513 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:52:58,513 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-04T06:52:58,514 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/data/hbase/meta/1588230740 2024-12-04T06:52:58,515 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/data/hbase/meta/1588230740 2024-12-04T06:52:58,516 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-04T06:52:58,516 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-04T06:52:58,516 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T06:52:58,517 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-04T06:52:58,519 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T06:52:58,519 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=864066, jitterRate=0.09871678054332733}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T06:52:58,520 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733295178505Initializing all the Stores at 1733295178506 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295178506Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295178506Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295178506Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295178506Cleaning up temporary data from old regions at 1733295178516 (+10 ms)Region opened successfully at 1733295178520 (+4 ms) 2024-12-04T06:52:58,520 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-04T06:52:58,520 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-04T06:52:58,520 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-04T06:52:58,520 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-04T06:52:58,520 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-04T06:52:58,521 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-04T06:52:58,521 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733295178520Disabling compacts and flushes for region at 1733295178520Disabling writes for close at 1733295178520Writing region close event to WAL at 1733295178521 (+1 ms)Closed at 1733295178521 2024-12-04T06:52:58,522 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T06:52:58,522 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-04T06:52:58,522 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-04T06:52:58,524 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-04T06:52:58,525 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-04T06:52:58,547 INFO [RS:0;607fd5c6574c:46331 {}] regionserver.HRegionServer(746): ClusterId : 7c3f8918-5ce8-43ff-896b-2ca777a16131 2024-12-04T06:52:58,548 DEBUG [RS:0;607fd5c6574c:46331 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-04T06:52:58,550 DEBUG [RS:0;607fd5c6574c:46331 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-04T06:52:58,550 DEBUG [RS:0;607fd5c6574c:46331 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-04T06:52:58,553 DEBUG [RS:0;607fd5c6574c:46331 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-04T06:52:58,554 DEBUG [RS:0;607fd5c6574c:46331 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65f1d2cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=607fd5c6574c/172.17.0.2:0 2024-12-04T06:52:58,565 DEBUG [RS:0;607fd5c6574c:46331 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;607fd5c6574c:46331 2024-12-04T06:52:58,566 INFO [RS:0;607fd5c6574c:46331 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-04T06:52:58,566 INFO [RS:0;607fd5c6574c:46331 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-04T06:52:58,566 DEBUG [RS:0;607fd5c6574c:46331 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-04T06:52:58,566 INFO [RS:0;607fd5c6574c:46331 {}] regionserver.HRegionServer(2659): reportForDuty to master=607fd5c6574c,34971,1733295177758 with port=46331, startcode=1733295177828 2024-12-04T06:52:58,567 DEBUG [RS:0;607fd5c6574c:46331 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-04T06:52:58,569 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33181, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-04T06:52:58,569 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34971 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 607fd5c6574c,46331,1733295177828 2024-12-04T06:52:58,569 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34971 {}] master.ServerManager(517): Registering regionserver=607fd5c6574c,46331,1733295177828 2024-12-04T06:52:58,571 DEBUG [RS:0;607fd5c6574c:46331 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b 2024-12-04T06:52:58,571 DEBUG [RS:0;607fd5c6574c:46331 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41687 2024-12-04T06:52:58,571 DEBUG [RS:0;607fd5c6574c:46331 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-04T06:52:58,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34971-0x1017c3f6a970000, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T06:52:58,573 DEBUG [RS:0;607fd5c6574c:46331 {}] zookeeper.ZKUtil(111): regionserver:46331-0x1017c3f6a970001, quorum=127.0.0.1:57132, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/607fd5c6574c,46331,1733295177828 2024-12-04T06:52:58,573 WARN [RS:0;607fd5c6574c:46331 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T06:52:58,573 INFO [RS:0;607fd5c6574c:46331 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T06:52:58,573 DEBUG [RS:0;607fd5c6574c:46331 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828 2024-12-04T06:52:58,573 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [607fd5c6574c,46331,1733295177828] 2024-12-04T06:52:58,579 INFO [RS:0;607fd5c6574c:46331 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-04T06:52:58,581 INFO [RS:0;607fd5c6574c:46331 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-04T06:52:58,581 INFO [RS:0;607fd5c6574c:46331 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-04T06:52:58,581 INFO [RS:0;607fd5c6574c:46331 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:58,582 INFO [RS:0;607fd5c6574c:46331 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-04T06:52:58,582 INFO [RS:0;607fd5c6574c:46331 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-04T06:52:58,583 INFO [RS:0;607fd5c6574c:46331 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:58,583 DEBUG [RS:0;607fd5c6574c:46331 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:58,583 DEBUG [RS:0;607fd5c6574c:46331 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:58,583 DEBUG [RS:0;607fd5c6574c:46331 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:58,583 DEBUG [RS:0;607fd5c6574c:46331 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:58,583 DEBUG [RS:0;607fd5c6574c:46331 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:58,583 DEBUG [RS:0;607fd5c6574c:46331 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/607fd5c6574c:0, corePoolSize=2, maxPoolSize=2 2024-12-04T06:52:58,583 DEBUG [RS:0;607fd5c6574c:46331 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:58,583 DEBUG [RS:0;607fd5c6574c:46331 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:58,583 DEBUG [RS:0;607fd5c6574c:46331 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:58,583 DEBUG [RS:0;607fd5c6574c:46331 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:58,583 DEBUG [RS:0;607fd5c6574c:46331 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:58,583 DEBUG [RS:0;607fd5c6574c:46331 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:52:58,583 DEBUG [RS:0;607fd5c6574c:46331 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/607fd5c6574c:0, corePoolSize=3, maxPoolSize=3 2024-12-04T06:52:58,583 DEBUG [RS:0;607fd5c6574c:46331 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0, corePoolSize=3, maxPoolSize=3 2024-12-04T06:52:58,584 INFO [RS:0;607fd5c6574c:46331 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:58,584 INFO [RS:0;607fd5c6574c:46331 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:58,584 INFO [RS:0;607fd5c6574c:46331 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:58,584 INFO [RS:0;607fd5c6574c:46331 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:58,584 INFO [RS:0;607fd5c6574c:46331 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:58,584 INFO [RS:0;607fd5c6574c:46331 {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,46331,1733295177828-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T06:52:58,601 INFO [RS:0;607fd5c6574c:46331 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-04T06:52:58,601 INFO [RS:0;607fd5c6574c:46331 {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,46331,1733295177828-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:58,601 INFO [RS:0;607fd5c6574c:46331 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:58,602 INFO [RS:0;607fd5c6574c:46331 {}] regionserver.Replication(171): 607fd5c6574c,46331,1733295177828 started 2024-12-04T06:52:58,617 INFO [RS:0;607fd5c6574c:46331 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:58,617 INFO [RS:0;607fd5c6574c:46331 {}] regionserver.HRegionServer(1482): Serving as 607fd5c6574c,46331,1733295177828, RpcServer on 607fd5c6574c/172.17.0.2:46331, sessionid=0x1017c3f6a970001 2024-12-04T06:52:58,617 DEBUG [RS:0;607fd5c6574c:46331 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-04T06:52:58,617 DEBUG [RS:0;607fd5c6574c:46331 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 607fd5c6574c,46331,1733295177828 2024-12-04T06:52:58,617 DEBUG [RS:0;607fd5c6574c:46331 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '607fd5c6574c,46331,1733295177828' 2024-12-04T06:52:58,617 DEBUG [RS:0;607fd5c6574c:46331 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-04T06:52:58,618 DEBUG [RS:0;607fd5c6574c:46331 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-04T06:52:58,618 DEBUG [RS:0;607fd5c6574c:46331 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-04T06:52:58,618 DEBUG [RS:0;607fd5c6574c:46331 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-04T06:52:58,618 DEBUG [RS:0;607fd5c6574c:46331 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 607fd5c6574c,46331,1733295177828 2024-12-04T06:52:58,618 DEBUG [RS:0;607fd5c6574c:46331 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '607fd5c6574c,46331,1733295177828' 2024-12-04T06:52:58,618 DEBUG [RS:0;607fd5c6574c:46331 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-04T06:52:58,618 DEBUG [RS:0;607fd5c6574c:46331 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-04T06:52:58,619 DEBUG [RS:0;607fd5c6574c:46331 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-04T06:52:58,619 INFO [RS:0;607fd5c6574c:46331 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-04T06:52:58,619 INFO [RS:0;607fd5c6574c:46331 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-04T06:52:58,675 WARN [607fd5c6574c:34971 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-04T06:52:58,721 INFO [RS:0;607fd5c6574c:46331 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=607fd5c6574c%2C46331%2C1733295177828, suffix=, logDir=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828, archiveDir=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/oldWALs, maxLogs=32 2024-12-04T06:52:58,722 INFO [RS:0;607fd5c6574c:46331 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C46331%2C1733295177828.1733295178722 2024-12-04T06:52:58,729 INFO [RS:0;607fd5c6574c:46331 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295178722 2024-12-04T06:52:58,730 DEBUG [RS:0;607fd5c6574c:46331 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39055:39055),(127.0.0.1/127.0.0.1:32811:32811)] 2024-12-04T06:52:58,925 DEBUG [607fd5c6574c:34971 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-04T06:52:58,926 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=607fd5c6574c,46331,1733295177828 2024-12-04T06:52:58,928 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 607fd5c6574c,46331,1733295177828, state=OPENING 2024-12-04T06:52:58,929 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-04T06:52:58,933 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46331-0x1017c3f6a970001, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:58,933 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34971-0x1017c3f6a970000, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:52:58,934 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-04T06:52:58,934 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T06:52:58,934 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T06:52:58,934 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=607fd5c6574c,46331,1733295177828}] 2024-12-04T06:52:59,067 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:52:59,087 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-04T06:52:59,089 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36009, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-04T06:52:59,093 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-04T06:52:59,093 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T06:52:59,095 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=607fd5c6574c%2C46331%2C1733295177828.meta, suffix=.meta, logDir=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828, archiveDir=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/oldWALs, maxLogs=32 2024-12-04T06:52:59,096 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C46331%2C1733295177828.meta.1733295179095.meta 2024-12-04T06:52:59,100 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.meta.1733295179095.meta 2024-12-04T06:52:59,102 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32811:32811),(127.0.0.1/127.0.0.1:39055:39055)] 2024-12-04T06:52:59,104 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-04T06:52:59,105 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-04T06:52:59,105 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-04T06:52:59,105 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-04T06:52:59,105 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-04T06:52:59,105 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:52:59,105 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-04T06:52:59,105 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-04T06:52:59,106 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-04T06:52:59,107 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-04T06:52:59,107 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:59,108 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:52:59,108 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-04T06:52:59,109 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-04T06:52:59,109 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:59,109 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:52:59,109 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-04T06:52:59,110 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-04T06:52:59,110 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:59,110 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:52:59,110 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-04T06:52:59,111 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-04T06:52:59,111 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:59,111 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:52:59,112 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-04T06:52:59,112 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/data/hbase/meta/1588230740 2024-12-04T06:52:59,113 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/data/hbase/meta/1588230740 2024-12-04T06:52:59,115 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-04T06:52:59,115 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-04T06:52:59,115 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T06:52:59,117 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-04T06:52:59,117 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=706189, jitterRate=-0.1020352691411972}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T06:52:59,118 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-04T06:52:59,118 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733295179105Writing region info on filesystem at 1733295179105Initializing all the Stores at 1733295179106 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295179106Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295179106Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295179106Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295179106Cleaning up temporary data from old regions at 1733295179115 (+9 ms)Running coprocessor post-open hooks at 1733295179118 (+3 ms)Region opened successfully at 1733295179118 2024-12-04T06:52:59,119 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733295179087 2024-12-04T06:52:59,122 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-04T06:52:59,122 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-04T06:52:59,123 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=607fd5c6574c,46331,1733295177828 2024-12-04T06:52:59,124 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 607fd5c6574c,46331,1733295177828, state=OPEN 2024-12-04T06:52:59,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46331-0x1017c3f6a970001, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T06:52:59,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34971-0x1017c3f6a970000, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T06:52:59,134 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=607fd5c6574c,46331,1733295177828 2024-12-04T06:52:59,134 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T06:52:59,134 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T06:52:59,137 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-04T06:52:59,137 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=607fd5c6574c,46331,1733295177828 in 200 msec 2024-12-04T06:52:59,140 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-04T06:52:59,140 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 615 msec 2024-12-04T06:52:59,141 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T06:52:59,141 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-04T06:52:59,142 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-04T06:52:59,142 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=607fd5c6574c,46331,1733295177828, seqNum=-1] 2024-12-04T06:52:59,143 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T06:52:59,144 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34369, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T06:52:59,151 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 673 msec 2024-12-04T06:52:59,152 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733295179151, completionTime=-1 2024-12-04T06:52:59,152 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-04T06:52:59,152 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-04T06:52:59,153 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-04T06:52:59,153 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733295239153 2024-12-04T06:52:59,153 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733295299153 2024-12-04T06:52:59,154 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-04T06:52:59,154 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,34971,1733295177758-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:59,154 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,34971,1733295177758-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:59,154 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,34971,1733295177758-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:59,154 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-607fd5c6574c:34971, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:59,154 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:59,154 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:59,156 DEBUG [master/607fd5c6574c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-04T06:52:59,158 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.261sec 2024-12-04T06:52:59,159 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-04T06:52:59,159 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-04T06:52:59,159 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-04T06:52:59,159 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-04T06:52:59,159 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-04T06:52:59,159 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,34971,1733295177758-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T06:52:59,159 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,34971,1733295177758-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-04T06:52:59,162 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-04T06:52:59,162 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-04T06:52:59,162 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,34971,1733295177758-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:52:59,248 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54dbaae8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T06:52:59,248 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 607fd5c6574c,34971,-1 for getting cluster id 2024-12-04T06:52:59,249 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-04T06:52:59,250 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c3f8918-5ce8-43ff-896b-2ca777a16131' 2024-12-04T06:52:59,251 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-04T06:52:59,251 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c3f8918-5ce8-43ff-896b-2ca777a16131" 2024-12-04T06:52:59,251 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79533b8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T06:52:59,251 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [607fd5c6574c,34971,-1] 2024-12-04T06:52:59,252 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-04T06:52:59,252 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:52:59,253 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40370, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-04T06:52:59,254 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68c19d09, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T06:52:59,255 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-04T06:52:59,256 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=607fd5c6574c,46331,1733295177828, seqNum=-1] 2024-12-04T06:52:59,256 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T06:52:59,258 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35280, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T06:52:59,259 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=607fd5c6574c,34971,1733295177758 2024-12-04T06:52:59,259 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:52:59,263 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-04T06:52:59,263 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-12-04T06:52:59,263 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-12-04T06:52:59,263 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-04T06:52:59,264 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 607fd5c6574c,34971,1733295177758 2024-12-04T06:52:59,264 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@78d066d6 2024-12-04T06:52:59,264 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-04T06:52:59,266 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40380, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-04T06:52:59,266 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34971 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-04T06:52:59,267 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34971 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-04T06:52:59,267 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34971 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T06:52:59,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34971 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-12-04T06:52:59,270 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-12-04T06:52:59,270 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:59,270 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34971 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-12-04T06:52:59,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34971 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-04T06:52:59,271 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-04T06:52:59,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44085 is added to blk_1073741835_1011 (size=395) 2024-12-04T06:52:59,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741835_1011 (size=395) 2024-12-04T06:52:59,281 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => faaa1947176becac3336274ce5fdea47, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733295179266.faaa1947176becac3336274ce5fdea47.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b 2024-12-04T06:52:59,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741836_1012 (size=78) 2024-12-04T06:52:59,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44085 is added to blk_1073741836_1012 (size=78) 2024-12-04T06:52:59,288 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733295179266.faaa1947176becac3336274ce5fdea47.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:52:59,288 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing faaa1947176becac3336274ce5fdea47, disabling compactions & flushes 2024-12-04T06:52:59,288 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733295179266.faaa1947176becac3336274ce5fdea47. 2024-12-04T06:52:59,288 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733295179266.faaa1947176becac3336274ce5fdea47. 2024-12-04T06:52:59,288 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733295179266.faaa1947176becac3336274ce5fdea47. after waiting 0 ms 2024-12-04T06:52:59,288 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733295179266.faaa1947176becac3336274ce5fdea47. 2024-12-04T06:52:59,288 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733295179266.faaa1947176becac3336274ce5fdea47. 2024-12-04T06:52:59,288 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for faaa1947176becac3336274ce5fdea47: Waiting for close lock at 1733295179288Disabling compacts and flushes for region at 1733295179288Disabling writes for close at 1733295179288Writing region close event to WAL at 1733295179288Closed at 1733295179288 2024-12-04T06:52:59,289 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-12-04T06:52:59,290 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1733295179266.faaa1947176becac3336274ce5fdea47.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1733295179289"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733295179289"}]},"ts":"1733295179289"} 2024-12-04T06:52:59,292 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-04T06:52:59,293 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-04T06:52:59,293 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733295179293"}]},"ts":"1733295179293"} 2024-12-04T06:52:59,295 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-12-04T06:52:59,295 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=faaa1947176becac3336274ce5fdea47, ASSIGN}] 2024-12-04T06:52:59,297 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=faaa1947176becac3336274ce5fdea47, ASSIGN 2024-12-04T06:52:59,297 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:52:59,298 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=faaa1947176becac3336274ce5fdea47, ASSIGN; state=OFFLINE, location=607fd5c6574c,46331,1733295177828; forceNewPlan=false, retain=false 2024-12-04T06:52:59,448 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=faaa1947176becac3336274ce5fdea47, regionState=OPENING, regionLocation=607fd5c6574c,46331,1733295177828 2024-12-04T06:52:59,451 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=faaa1947176becac3336274ce5fdea47, ASSIGN because future has completed 2024-12-04T06:52:59,452 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure faaa1947176becac3336274ce5fdea47, server=607fd5c6574c,46331,1733295177828}] 2024-12-04T06:52:59,613 INFO [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1733295179266.faaa1947176becac3336274ce5fdea47. 2024-12-04T06:52:59,613 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => faaa1947176becac3336274ce5fdea47, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733295179266.faaa1947176becac3336274ce5fdea47.', STARTKEY => '', ENDKEY => ''} 2024-12-04T06:52:59,614 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart faaa1947176becac3336274ce5fdea47 2024-12-04T06:52:59,614 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733295179266.faaa1947176becac3336274ce5fdea47.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:52:59,614 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for faaa1947176becac3336274ce5fdea47 2024-12-04T06:52:59,614 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for faaa1947176becac3336274ce5fdea47 2024-12-04T06:52:59,616 INFO [StoreOpener-faaa1947176becac3336274ce5fdea47-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region faaa1947176becac3336274ce5fdea47 2024-12-04T06:52:59,618 INFO [StoreOpener-faaa1947176becac3336274ce5fdea47-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region faaa1947176becac3336274ce5fdea47 columnFamilyName info 2024-12-04T06:52:59,618 DEBUG [StoreOpener-faaa1947176becac3336274ce5fdea47-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:52:59,618 INFO [StoreOpener-faaa1947176becac3336274ce5fdea47-1 {}] regionserver.HStore(327): Store=faaa1947176becac3336274ce5fdea47/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T06:52:59,618 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for faaa1947176becac3336274ce5fdea47 2024-12-04T06:52:59,619 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/data/default/TestLogRolling-testLogRollOnPipelineRestart/faaa1947176becac3336274ce5fdea47 2024-12-04T06:52:59,620 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/data/default/TestLogRolling-testLogRollOnPipelineRestart/faaa1947176becac3336274ce5fdea47 2024-12-04T06:52:59,620 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for faaa1947176becac3336274ce5fdea47 2024-12-04T06:52:59,621 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for faaa1947176becac3336274ce5fdea47 2024-12-04T06:52:59,623 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for faaa1947176becac3336274ce5fdea47 2024-12-04T06:52:59,627 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/data/default/TestLogRolling-testLogRollOnPipelineRestart/faaa1947176becac3336274ce5fdea47/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T06:52:59,631 INFO [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened faaa1947176becac3336274ce5fdea47; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=736276, jitterRate=-0.06377696990966797}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-04T06:52:59,631 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for faaa1947176becac3336274ce5fdea47 2024-12-04T06:52:59,632 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for faaa1947176becac3336274ce5fdea47: Running coprocessor pre-open hook at 1733295179614Writing region info on filesystem at 1733295179614Initializing all the Stores at 1733295179615 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295179615Cleaning up temporary data from old regions at 1733295179621 (+6 ms)Running coprocessor post-open hooks at 1733295179631 (+10 ms)Region opened successfully at 1733295179632 (+1 ms) 2024-12-04T06:52:59,633 INFO [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1733295179266.faaa1947176becac3336274ce5fdea47., pid=6, masterSystemTime=1733295179608 2024-12-04T06:52:59,636 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1733295179266.faaa1947176becac3336274ce5fdea47. 2024-12-04T06:52:59,636 INFO [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1733295179266.faaa1947176becac3336274ce5fdea47. 2024-12-04T06:52:59,637 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=faaa1947176becac3336274ce5fdea47, regionState=OPEN, openSeqNum=2, regionLocation=607fd5c6574c,46331,1733295177828 2024-12-04T06:52:59,640 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure faaa1947176becac3336274ce5fdea47, server=607fd5c6574c,46331,1733295177828 because future has completed 2024-12-04T06:52:59,645 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-04T06:52:59,645 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure faaa1947176becac3336274ce5fdea47, server=607fd5c6574c,46331,1733295177828 in 190 msec 2024-12-04T06:52:59,648 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-04T06:52:59,648 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=faaa1947176becac3336274ce5fdea47, ASSIGN in 350 msec 2024-12-04T06:52:59,649 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-04T06:52:59,650 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733295179649"}]},"ts":"1733295179649"} 2024-12-04T06:52:59,651 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-12-04T06:52:59,653 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-12-04T06:52:59,655 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 386 msec 2024-12-04T06:53:00,067 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:00,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:01,068 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:01,299 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:02,069 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:02,299 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:02,763 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-04T06:53:02,763 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-04T06:53:02,764 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-04T06:53:02,764 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-12-04T06:53:02,764 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-04T06:53:02,764 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-04T06:53:03,070 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:03,300 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:04,070 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:04,300 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:04,607 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-04T06:53:04,624 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:04,624 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:04,625 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:04,625 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:04,625 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:04,626 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:04,629 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:04,629 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:04,629 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:04,631 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:04,636 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-04T06:53:04,637 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-12-04T06:53:05,071 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:05,301 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:06,072 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:06,302 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:07,073 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:07,302 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:08,073 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:08,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:09,074 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:09,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:09,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34971 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-04T06:53:09,339 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-12-04T06:53:09,339 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-12-04T06:53:09,342 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-12-04T06:53:09,342 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1733295179266.faaa1947176becac3336274ce5fdea47. 2024-12-04T06:53:09,346 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1733295179266.faaa1947176becac3336274ce5fdea47., hostname=607fd5c6574c,46331,1733295177828, seqNum=2] 2024-12-04T06:53:10,075 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:10,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:11,075 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:11,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:11,349 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295178722 2024-12-04T06:53:11,350 WARN [ResponseProcessor for block BP-364349617-172.17.0.2-1733295176903:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-364349617-172.17.0.2-1733295176903:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:53:11,350 WARN [ResponseProcessor for block BP-364349617-172.17.0.2-1733295176903:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-364349617-172.17.0.2-1733295176903:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-364349617-172.17.0.2-1733295176903:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:46345,DS-77675f3b-25be-416e-8f7f-41bfc2e1d8be,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:53:11,350 WARN [DataStreamer for file /user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295178722 block BP-364349617-172.17.0.2-1733295176903:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-364349617-172.17.0.2-1733295176903:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44085,DS-92862bca-953a-4443-8da2-bdbfb8aba738,DISK], DatanodeInfoWithStorage[127.0.0.1:46345,DS-77675f3b-25be-416e-8f7f-41bfc2e1d8be,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46345,DS-77675f3b-25be-416e-8f7f-41bfc2e1d8be,DISK]) is bad. 2024-12-04T06:53:11,350 WARN [PacketResponder: BP-364349617-172.17.0.2-1733295176903:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:46345] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:53:11,351 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_53709073_22 at /127.0.0.1:48954 [Receiving block BP-364349617-172.17.0.2-1733295176903:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44085:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48954 dst: /127.0.0.1:44085 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:53:11,351 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_53709073_22 at /127.0.0.1:46762 [Receiving block BP-364349617-172.17.0.2-1733295176903:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:46345:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46762 dst: /127.0.0.1:46345 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:53:11,352 WARN [DataStreamer for file /user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.meta.1733295179095.meta block BP-364349617-172.17.0.2-1733295176903:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-364349617-172.17.0.2-1733295176903:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46345,DS-77675f3b-25be-416e-8f7f-41bfc2e1d8be,DISK], DatanodeInfoWithStorage[127.0.0.1:44085,DS-92862bca-953a-4443-8da2-bdbfb8aba738,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46345,DS-77675f3b-25be-416e-8f7f-41bfc2e1d8be,DISK]) is bad. 2024-12-04T06:53:11,353 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_53709073_22 at /127.0.0.1:46766 [Receiving block BP-364349617-172.17.0.2-1733295176903:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:46345:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46766 dst: /127.0.0.1:46345 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:53:11,353 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_53709073_22 at /127.0.0.1:48964 [Receiving block BP-364349617-172.17.0.2-1733295176903:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44085:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48964 dst: /127.0.0.1:44085 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:53:11,354 WARN [ResponseProcessor for block BP-364349617-172.17.0.2-1733295176903:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-364349617-172.17.0.2-1733295176903:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-364349617-172.17.0.2-1733295176903:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:46345,DS-77675f3b-25be-416e-8f7f-41bfc2e1d8be,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:53:11,354 WARN [DataStreamer for file /user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/WALs/607fd5c6574c,34971,1733295177758/607fd5c6574c%2C34971%2C1733295177758.1733295178385 block BP-364349617-172.17.0.2-1733295176903:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-364349617-172.17.0.2-1733295176903:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44085,DS-92862bca-953a-4443-8da2-bdbfb8aba738,DISK], DatanodeInfoWithStorage[127.0.0.1:46345,DS-77675f3b-25be-416e-8f7f-41bfc2e1d8be,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46345,DS-77675f3b-25be-416e-8f7f-41bfc2e1d8be,DISK]) is bad. 2024-12-04T06:53:11,355 WARN [PacketResponder: BP-364349617-172.17.0.2-1733295176903:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:46345] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:53:11,356 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1921710488_22 at /127.0.0.1:46742 [Receiving block BP-364349617-172.17.0.2-1733295176903:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46345:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46742 dst: /127.0.0.1:46345 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:53:11,356 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1921710488_22 at /127.0.0.1:48934 [Receiving block BP-364349617-172.17.0.2-1733295176903:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44085:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48934 dst: /127.0.0.1:44085 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:53:11,374 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@43a454f0{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:53:11,374 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4efda07f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T06:53:11,374 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T06:53:11,374 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2a99c341{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T06:53:11,375 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@179d1ca6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/hadoop.log.dir/,STOPPED} 2024-12-04T06:53:11,376 WARN [BP-364349617-172.17.0.2-1733295176903 heartbeating to localhost/127.0.0.1:41687 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T06:53:11,376 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T06:53:11,376 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T06:53:11,376 WARN [BP-364349617-172.17.0.2-1733295176903 heartbeating to localhost/127.0.0.1:41687 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-364349617-172.17.0.2-1733295176903 (Datanode Uuid 1ca9d8ab-597a-4dac-b687-393278660315) service to localhost/127.0.0.1:41687 2024-12-04T06:53:11,377 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/cluster_334a1604-d43e-79e1-6c44-751b06f7fb4e/data/data3/current/BP-364349617-172.17.0.2-1733295176903 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:53:11,377 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/cluster_334a1604-d43e-79e1-6c44-751b06f7fb4e/data/data4/current/BP-364349617-172.17.0.2-1733295176903 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:53:11,377 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T06:53:11,389 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:53:11,391 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T06:53:11,392 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T06:53:11,392 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T06:53:11,392 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T06:53:11,393 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@27e9dc43{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/hadoop.log.dir/,AVAILABLE} 2024-12-04T06:53:11,393 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@32ef6c57{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T06:53:11,513 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2ac8bbe4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/java.io.tmpdir/jetty-localhost-34941-hadoop-hdfs-3_4_1-tests_jar-_-any-6324389497864726434/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:53:11,513 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@64d3fc67{HTTP/1.1, (http/1.1)}{localhost:34941} 2024-12-04T06:53:11,513 INFO [Time-limited test {}] server.Server(415): Started @164038ms 2024-12-04T06:53:11,515 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T06:53:11,539 WARN [ResponseProcessor for block BP-364349617-172.17.0.2-1733295176903:blk_1073741834_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-364349617-172.17.0.2-1733295176903:blk_1073741834_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:53:11,539 WARN [ResponseProcessor for block BP-364349617-172.17.0.2-1733295176903:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-364349617-172.17.0.2-1733295176903:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:53:11,539 WARN [ResponseProcessor for block BP-364349617-172.17.0.2-1733295176903:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-364349617-172.17.0.2-1733295176903:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:53:11,540 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_53709073_22 at /127.0.0.1:46458 [Receiving block BP-364349617-172.17.0.2-1733295176903:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44085:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46458 dst: /127.0.0.1:44085 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:53:11,540 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_53709073_22 at /127.0.0.1:46456 [Receiving block BP-364349617-172.17.0.2-1733295176903:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44085:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46456 dst: /127.0.0.1:44085 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:53:11,540 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1921710488_22 at /127.0.0.1:46464 [Receiving block BP-364349617-172.17.0.2-1733295176903:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44085:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46464 dst: /127.0.0.1:44085 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:53:11,546 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@dd504c3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:53:11,546 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@235b88f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T06:53:11,546 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T06:53:11,547 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5485abbd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T06:53:11,547 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3de05dd9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/hadoop.log.dir/,STOPPED} 2024-12-04T06:53:11,548 WARN [BP-364349617-172.17.0.2-1733295176903 heartbeating to localhost/127.0.0.1:41687 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T06:53:11,548 WARN [BP-364349617-172.17.0.2-1733295176903 heartbeating to localhost/127.0.0.1:41687 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-364349617-172.17.0.2-1733295176903 (Datanode Uuid cbeeeff1-6991-455d-942c-a8ed47d0b9de) service to localhost/127.0.0.1:41687 2024-12-04T06:53:11,548 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T06:53:11,548 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T06:53:11,551 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/cluster_334a1604-d43e-79e1-6c44-751b06f7fb4e/data/data1/current/BP-364349617-172.17.0.2-1733295176903 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:53:11,551 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/cluster_334a1604-d43e-79e1-6c44-751b06f7fb4e/data/data2/current/BP-364349617-172.17.0.2-1733295176903 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:53:11,551 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T06:53:11,559 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:53:11,561 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T06:53:11,562 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T06:53:11,562 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T06:53:11,562 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T06:53:11,563 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@12f3a1cd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/hadoop.log.dir/,AVAILABLE} 2024-12-04T06:53:11,563 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7f264700{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T06:53:11,614 WARN [Thread-1326 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T06:53:11,616 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd30fbdfec70dcd0d with lease ID 0x992fd9214c62657b: from storage DS-77675f3b-25be-416e-8f7f-41bfc2e1d8be node DatanodeRegistration(127.0.0.1:41657, datanodeUuid=1ca9d8ab-597a-4dac-b687-393278660315, infoPort=34673, infoSecurePort=0, ipcPort=37305, storageInfo=lv=-57;cid=testClusterID;nsid=2126775024;c=1733295176903), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:53:11,616 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd30fbdfec70dcd0d with lease ID 0x992fd9214c62657b: from storage DS-385daa33-8ce4-4883-bea0-86d9ff7ff308 node DatanodeRegistration(127.0.0.1:41657, datanodeUuid=1ca9d8ab-597a-4dac-b687-393278660315, infoPort=34673, infoSecurePort=0, ipcPort=37305, storageInfo=lv=-57;cid=testClusterID;nsid=2126775024;c=1733295176903), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:53:11,685 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@a2ed0f2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/java.io.tmpdir/jetty-localhost-44539-hadoop-hdfs-3_4_1-tests_jar-_-any-17259054791155674526/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:53:11,686 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@540977e5{HTTP/1.1, (http/1.1)}{localhost:44539} 2024-12-04T06:53:11,686 INFO [Time-limited test {}] server.Server(415): Started @164210ms 2024-12-04T06:53:11,687 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T06:53:11,798 WARN [Thread-1357 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T06:53:11,801 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb7d4032893b175ac with lease ID 0x992fd9214c62657c: from storage DS-92862bca-953a-4443-8da2-bdbfb8aba738 node DatanodeRegistration(127.0.0.1:45579, datanodeUuid=cbeeeff1-6991-455d-942c-a8ed47d0b9de, infoPort=33679, infoSecurePort=0, ipcPort=36075, storageInfo=lv=-57;cid=testClusterID;nsid=2126775024;c=1733295176903), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:53:11,801 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb7d4032893b175ac with lease ID 0x992fd9214c62657c: from storage DS-24ec8f24-3a06-49e6-b44e-4754fcd77d75 node DatanodeRegistration(127.0.0.1:45579, datanodeUuid=cbeeeff1-6991-455d-942c-a8ed47d0b9de, infoPort=33679, infoSecurePort=0, ipcPort=36075, storageInfo=lv=-57;cid=testClusterID;nsid=2126775024;c=1733295176903), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:53:12,076 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:12,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:12,715 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-12-04T06:53:12,718 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-12-04T06:53:12,720 ERROR [FSHLog-0-hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b-prefix:607fd5c6574c,46331,1733295177828 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44085,DS-92862bca-953a-4443-8da2-bdbfb8aba738,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:53:12,720 WARN [FSHLog-0-hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b-prefix:607fd5c6574c,46331,1733295177828 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44085,DS-92862bca-953a-4443-8da2-bdbfb8aba738,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:53:12,720 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 607fd5c6574c%2C46331%2C1733295177828:(num 1733295178722) roll requested 2024-12-04T06:53:12,721 INFO [regionserver/607fd5c6574c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C46331%2C1733295177828.1733295192720 2024-12-04T06:53:12,726 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295178722 newFile=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295192720 2024-12-04T06:53:12,726 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:12,727 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:12,727 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:12,727 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:12,727 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:12,727 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295178722 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295192720 2024-12-04T06:53:12,727 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44085,DS-92862bca-953a-4443-8da2-bdbfb8aba738,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:53:12,727 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44085,DS-92862bca-953a-4443-8da2-bdbfb8aba738,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:53:12,728 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295178722 2024-12-04T06:53:12,728 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34673:34673),(127.0.0.1/127.0.0.1:33679:33679)] 2024-12-04T06:53:12,728 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295178722 is not closed yet, will try archiving it next time 2024-12-04T06:53:12,728 WARN [IPC Server handler 3 on default port 41687 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295178722 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-12-04T06:53:12,728 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295178722 after 0ms 2024-12-04T06:53:13,076 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:13,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:14,077 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:14,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:14,732 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-12-04T06:53:15,078 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:15,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:15,616 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-04T06:53:16,078 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:16,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:16,729 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295178722 after 4001ms 2024-12-04T06:53:16,735 WARN [ResponseProcessor for block BP-364349617-172.17.0.2-1733295176903:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-364349617-172.17.0.2-1733295176903:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-364349617-172.17.0.2-1733295176903:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:45579,DS-92862bca-953a-4443-8da2-bdbfb8aba738,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:53:16,735 WARN [DataStreamer for file /user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295192720 block BP-364349617-172.17.0.2-1733295176903:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-364349617-172.17.0.2-1733295176903:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41657,DS-77675f3b-25be-416e-8f7f-41bfc2e1d8be,DISK], DatanodeInfoWithStorage[127.0.0.1:45579,DS-92862bca-953a-4443-8da2-bdbfb8aba738,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45579,DS-92862bca-953a-4443-8da2-bdbfb8aba738,DISK]) is bad. 2024-12-04T06:53:16,735 WARN [PacketResponder: BP-364349617-172.17.0.2-1733295176903:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:45579] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:53:16,735 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_53709073_22 at /127.0.0.1:56368 [Receiving block BP-364349617-172.17.0.2-1733295176903:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:41657:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56368 dst: /127.0.0.1:41657 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:53:16,736 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_53709073_22 at /127.0.0.1:53772 [Receiving block BP-364349617-172.17.0.2-1733295176903:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:45579:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53772 dst: /127.0.0.1:45579 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:53:16,737 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@a2ed0f2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:53:16,737 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@540977e5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T06:53:16,737 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T06:53:16,737 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7f264700{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T06:53:16,738 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@12f3a1cd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/hadoop.log.dir/,STOPPED} 2024-12-04T06:53:16,738 WARN [BP-364349617-172.17.0.2-1733295176903 heartbeating to localhost/127.0.0.1:41687 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T06:53:16,738 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T06:53:16,738 WARN [BP-364349617-172.17.0.2-1733295176903 heartbeating to localhost/127.0.0.1:41687 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-364349617-172.17.0.2-1733295176903 (Datanode Uuid cbeeeff1-6991-455d-942c-a8ed47d0b9de) service to localhost/127.0.0.1:41687 2024-12-04T06:53:16,738 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T06:53:16,739 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/cluster_334a1604-d43e-79e1-6c44-751b06f7fb4e/data/data1/current/BP-364349617-172.17.0.2-1733295176903 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:53:16,739 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/cluster_334a1604-d43e-79e1-6c44-751b06f7fb4e/data/data2/current/BP-364349617-172.17.0.2-1733295176903 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:53:16,739 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T06:53:16,749 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:53:16,753 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T06:53:16,754 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T06:53:16,754 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T06:53:16,754 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T06:53:16,754 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62802d30{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/hadoop.log.dir/,AVAILABLE} 2024-12-04T06:53:16,754 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2460467f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T06:53:16,870 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@443f9e40{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/java.io.tmpdir/jetty-localhost-46027-hadoop-hdfs-3_4_1-tests_jar-_-any-507054689359465605/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:53:16,870 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6ff7fcbc{HTTP/1.1, (http/1.1)}{localhost:46027} 2024-12-04T06:53:16,870 INFO [Time-limited test {}] server.Server(415): Started @169395ms 2024-12-04T06:53:16,871 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T06:53:16,891 WARN [ResponseProcessor for block BP-364349617-172.17.0.2-1733295176903:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-364349617-172.17.0.2-1733295176903:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:53:16,892 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_53709073_22 at /127.0.0.1:56388 [Receiving block BP-364349617-172.17.0.2-1733295176903:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:41657:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56388 dst: /127.0.0.1:41657 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:53:16,906 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2ac8bbe4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:53:16,906 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@64d3fc67{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T06:53:16,906 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T06:53:16,906 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@32ef6c57{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T06:53:16,906 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@27e9dc43{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/hadoop.log.dir/,STOPPED} 2024-12-04T06:53:16,913 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T06:53:16,913 WARN [BP-364349617-172.17.0.2-1733295176903 heartbeating to localhost/127.0.0.1:41687 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T06:53:16,913 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T06:53:16,913 WARN [BP-364349617-172.17.0.2-1733295176903 heartbeating to localhost/127.0.0.1:41687 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-364349617-172.17.0.2-1733295176903 (Datanode Uuid 1ca9d8ab-597a-4dac-b687-393278660315) service to localhost/127.0.0.1:41687 2024-12-04T06:53:16,914 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/cluster_334a1604-d43e-79e1-6c44-751b06f7fb4e/data/data3/current/BP-364349617-172.17.0.2-1733295176903 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:53:16,914 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/cluster_334a1604-d43e-79e1-6c44-751b06f7fb4e/data/data4/current/BP-364349617-172.17.0.2-1733295176903 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:53:16,914 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T06:53:16,941 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:53:16,944 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T06:53:16,945 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T06:53:16,945 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T06:53:16,945 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-04T06:53:16,947 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6baabd83{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/hadoop.log.dir/,AVAILABLE} 2024-12-04T06:53:16,948 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75ed142f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T06:53:17,012 WARN [Thread-1400 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T06:53:17,014 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2c01b17805cf9616 with lease ID 0x992fd9214c62657d: from storage DS-92862bca-953a-4443-8da2-bdbfb8aba738 node DatanodeRegistration(127.0.0.1:34175, datanodeUuid=cbeeeff1-6991-455d-942c-a8ed47d0b9de, infoPort=37587, infoSecurePort=0, ipcPort=43991, storageInfo=lv=-57;cid=testClusterID;nsid=2126775024;c=1733295176903), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:53:17,015 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2c01b17805cf9616 with lease ID 0x992fd9214c62657d: from storage DS-24ec8f24-3a06-49e6-b44e-4754fcd77d75 node DatanodeRegistration(127.0.0.1:34175, datanodeUuid=cbeeeff1-6991-455d-942c-a8ed47d0b9de, infoPort=37587, infoSecurePort=0, ipcPort=43991, storageInfo=lv=-57;cid=testClusterID;nsid=2126775024;c=1733295176903), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:53:17,079 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:17,105 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@39f8899d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/java.io.tmpdir/jetty-localhost-37551-hadoop-hdfs-3_4_1-tests_jar-_-any-6629610961339606512/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:53:17,105 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7a441552{HTTP/1.1, (http/1.1)}{localhost:37551} 2024-12-04T06:53:17,105 INFO [Time-limited test {}] server.Server(415): Started @169630ms 2024-12-04T06:53:17,108 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T06:53:17,202 WARN [Thread-1431 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T06:53:17,204 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x888fc38d017ffbcd with lease ID 0x992fd9214c62657e: from storage DS-77675f3b-25be-416e-8f7f-41bfc2e1d8be node DatanodeRegistration(127.0.0.1:40413, datanodeUuid=1ca9d8ab-597a-4dac-b687-393278660315, infoPort=33951, infoSecurePort=0, ipcPort=37407, storageInfo=lv=-57;cid=testClusterID;nsid=2126775024;c=1733295176903), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:53:17,204 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x888fc38d017ffbcd with lease ID 0x992fd9214c62657e: from storage DS-385daa33-8ce4-4883-bea0-86d9ff7ff308 node DatanodeRegistration(127.0.0.1:40413, datanodeUuid=1ca9d8ab-597a-4dac-b687-393278660315, infoPort=33951, infoSecurePort=0, ipcPort=37407, storageInfo=lv=-57;cid=testClusterID;nsid=2126775024;c=1733295176903), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:53:17,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:18,079 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:18,129 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-12-04T06:53:18,132 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-12-04T06:53:18,133 ERROR [FSHLog-0-hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b-prefix:607fd5c6574c,46331,1733295177828 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41657,DS-77675f3b-25be-416e-8f7f-41bfc2e1d8be,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:53:18,134 WARN [FSHLog-0-hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b-prefix:607fd5c6574c,46331,1733295177828 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41657,DS-77675f3b-25be-416e-8f7f-41bfc2e1d8be,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:53:18,134 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 607fd5c6574c%2C46331%2C1733295177828:(num 1733295192720) roll requested 2024-12-04T06:53:18,134 INFO [regionserver/607fd5c6574c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C46331%2C1733295177828.1733295198134 2024-12-04T06:53:18,148 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295192720 newFile=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295198134 2024-12-04T06:53:18,149 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:18,149 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:18,149 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:18,149 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:18,149 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:18,149 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295192720 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295198134 2024-12-04T06:53:18,150 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41657,DS-77675f3b-25be-416e-8f7f-41bfc2e1d8be,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:53:18,150 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41657,DS-77675f3b-25be-416e-8f7f-41bfc2e1d8be,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:53:18,150 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295192720 2024-12-04T06:53:18,150 WARN [IPC Server handler 4 on default port 41687 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295192720 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-12-04T06:53:18,151 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295192720 after 0ms 2024-12-04T06:53:18,160 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37587:37587),(127.0.0.1/127.0.0.1:33951:33951)] 2024-12-04T06:53:18,160 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295192720 is not closed yet, will try archiving it next time 2024-12-04T06:53:18,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:19,080 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:19,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:20,081 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:20,163 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C46331%2C1733295177828.1733295200163 2024-12-04T06:53:20,169 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295198134 newFile=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295200163 2024-12-04T06:53:20,169 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:20,169 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:20,170 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:20,170 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:20,170 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:20,170 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295198134 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295200163 2024-12-04T06:53:20,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40413 is added to blk_1073741838_1019 (size=1264) 2024-12-04T06:53:20,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34175 is added to blk_1073741838_1019 (size=1264) 2024-12-04T06:53:20,172 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37587:37587),(127.0.0.1/127.0.0.1:33951:33951)] 2024-12-04T06:53:20,172 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295192720 is not closed yet, will try archiving it next time 2024-12-04T06:53:20,172 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295198134 is not closed yet, will try archiving it next time 2024-12-04T06:53:20,173 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295178722 2024-12-04T06:53:20,173 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295178722 2024-12-04T06:53:20,173 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295192720 is not closed yet, will try archiving it next time 2024-12-04T06:53:20,173 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295178722 after 0ms 2024-12-04T06:53:20,173 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295178722 2024-12-04T06:53:20,183 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1733295179632/Put/vlen=218/seqid=0] 2024-12-04T06:53:20,183 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1733295189347/Put/vlen=1045/seqid=0] 2024-12-04T06:53:20,183 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295178722 2024-12-04T06:53:20,183 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295192720 2024-12-04T06:53:20,183 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295192720 2024-12-04T06:53:20,184 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295192720 after 1ms 2024-12-04T06:53:20,184 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295192720 2024-12-04T06:53:20,187 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1733295192720/Put/vlen=1045/seqid=0] 2024-12-04T06:53:20,187 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1733295194733/Put/vlen=1045/seqid=0] 2024-12-04T06:53:20,187 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295192720 2024-12-04T06:53:20,187 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295198134 2024-12-04T06:53:20,187 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295198134 2024-12-04T06:53:20,188 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295198134 after 1ms 2024-12-04T06:53:20,188 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295198134 2024-12-04T06:53:20,190 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1733295198133/Put/vlen=1045/seqid=0] 2024-12-04T06:53:20,191 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295200163 2024-12-04T06:53:20,191 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295200163 2024-12-04T06:53:20,191 WARN [IPC Server handler 0 on default port 41687 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295200163 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-12-04T06:53:20,191 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295200163 after 0ms 2024-12-04T06:53:20,311 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:21,015 WARN [ResponseProcessor for block BP-364349617-172.17.0.2-1733295176903:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-364349617-172.17.0.2-1733295176903:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:53:21,015 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1921710488_22 at /127.0.0.1:39700 [Receiving block BP-364349617-172.17.0.2-1733295176903:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:34175:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39700 dst: /127.0.0.1:34175 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:34175 remote=/127.0.0.1:39700]. Total timeout mills is 60000, 59153 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:53:21,016 WARN [DataStreamer for file /user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295200163 block BP-364349617-172.17.0.2-1733295176903:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-364349617-172.17.0.2-1733295176903:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34175,DS-92862bca-953a-4443-8da2-bdbfb8aba738,DISK], DatanodeInfoWithStorage[127.0.0.1:40413,DS-77675f3b-25be-416e-8f7f-41bfc2e1d8be,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34175,DS-92862bca-953a-4443-8da2-bdbfb8aba738,DISK]) is bad. 2024-12-04T06:53:21,016 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1921710488_22 at /127.0.0.1:55034 [Receiving block BP-364349617-172.17.0.2-1733295176903:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:40413:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55034 dst: /127.0.0.1:40413 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:53:21,017 WARN [DataStreamer for file /user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295200163 block BP-364349617-172.17.0.2-1733295176903:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-364349617-172.17.0.2-1733295176903:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:53:21,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34175 is added to blk_1073741839_1022 (size=85) 2024-12-04T06:53:21,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40413 is added to blk_1073741839_1022 (size=85) 2024-12-04T06:53:21,082 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:21,311 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:22,015 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-04T06:53:22,082 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:22,152 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295192720 after 4002ms 2024-12-04T06:53:22,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:23,083 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:23,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:24,083 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:24,192 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295200163 after 4001ms 2024-12-04T06:53:24,192 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295200163 2024-12-04T06:53:24,201 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295200163 2024-12-04T06:53:24,201 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing faaa1947176becac3336274ce5fdea47 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-12-04T06:53:24,201 ERROR [FSHLog-0-hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b-prefix:607fd5c6574c,46331,1733295177828 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-364349617-172.17.0.2-1733295176903:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:53:24,202 WARN [FSHLog-0-hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b-prefix:607fd5c6574c,46331,1733295177828 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-364349617-172.17.0.2-1733295176903:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:53:24,202 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 607fd5c6574c%2C46331%2C1733295177828:(num 1733295200163) roll requested 2024-12-04T06:53:24,203 INFO [regionserver/607fd5c6574c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C46331%2C1733295177828.1733295204203 2024-12-04T06:53:24,218 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295200163 newFile=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295204203 2024-12-04T06:53:24,218 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:24,218 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:24,218 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:24,219 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:24,219 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:24,219 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295200163 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295204203 2024-12-04T06:53:24,219 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-364349617-172.17.0.2-1733295176903:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:53:24,220 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-364349617-172.17.0.2-1733295176903:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:53:24,220 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295200163 2024-12-04T06:53:24,221 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295200163 after 0ms 2024-12-04T06:53:24,225 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.1733295200163 to hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/oldWALs/607fd5c6574c%2C46331%2C1733295177828.1733295200163 2024-12-04T06:53:24,236 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37587:37587),(127.0.0.1/127.0.0.1:33951:33951)] 2024-12-04T06:53:24,255 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/data/default/TestLogRolling-testLogRollOnPipelineRestart/faaa1947176becac3336274ce5fdea47/.tmp/info/3d8c629ba6ee4dd1a04666622f6993b1 is 1080, key is row1002/info:/1733295189347/Put/seqid=0 2024-12-04T06:53:24,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40413 is added to blk_1073741841_1024 (size=9270) 2024-12-04T06:53:24,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34175 is added to blk_1073741841_1024 (size=9270) 2024-12-04T06:53:24,264 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/data/default/TestLogRolling-testLogRollOnPipelineRestart/faaa1947176becac3336274ce5fdea47/.tmp/info/3d8c629ba6ee4dd1a04666622f6993b1 2024-12-04T06:53:24,271 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/data/default/TestLogRolling-testLogRollOnPipelineRestart/faaa1947176becac3336274ce5fdea47/.tmp/info/3d8c629ba6ee4dd1a04666622f6993b1 as hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/data/default/TestLogRolling-testLogRollOnPipelineRestart/faaa1947176becac3336274ce5fdea47/info/3d8c629ba6ee4dd1a04666622f6993b1 2024-12-04T06:53:24,277 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/data/default/TestLogRolling-testLogRollOnPipelineRestart/faaa1947176becac3336274ce5fdea47/info/3d8c629ba6ee4dd1a04666622f6993b1, entries=4, sequenceid=8, filesize=9.1 K 2024-12-04T06:53:24,279 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for faaa1947176becac3336274ce5fdea47 in 78ms, sequenceid=8, compaction requested=false 2024-12-04T06:53:24,279 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for faaa1947176becac3336274ce5fdea47: 2024-12-04T06:53:24,279 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-12-04T06:53:24,279 ERROR [FSHLog-0-hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b-prefix:607fd5c6574c,46331,1733295177828.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44085,DS-92862bca-953a-4443-8da2-bdbfb8aba738,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:53:24,280 WARN [FSHLog-0-hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b-prefix:607fd5c6574c,46331,1733295177828.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44085,DS-92862bca-953a-4443-8da2-bdbfb8aba738,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:53:24,280 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 607fd5c6574c%2C46331%2C1733295177828.meta:.meta(num 1733295179095) roll requested 2024-12-04T06:53:24,280 INFO [regionserver/607fd5c6574c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C46331%2C1733295177828.meta.1733295204280.meta 2024-12-04T06:53:24,289 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:24,289 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:24,289 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:24,290 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:24,290 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:24,290 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.meta.1733295179095.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.meta.1733295204280.meta 2024-12-04T06:53:24,290 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44085,DS-92862bca-953a-4443-8da2-bdbfb8aba738,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:53:24,290 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44085,DS-92862bca-953a-4443-8da2-bdbfb8aba738,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:53:24,290 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.meta.1733295179095.meta 2024-12-04T06:53:24,291 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33951:33951),(127.0.0.1/127.0.0.1:37587:37587)] 2024-12-04T06:53:24,291 DEBUG [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.meta.1733295179095.meta is not closed yet, will try archiving it next time 2024-12-04T06:53:24,291 WARN [IPC Server handler 3 on default port 41687 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.meta.1733295179095.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1013 2024-12-04T06:53:24,291 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.meta.1733295179095.meta after 1ms 2024-12-04T06:53:24,306 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/data/hbase/meta/1588230740/.tmp/info/883f32440efa443f82e498932dfb74e9 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1733295179266.faaa1947176becac3336274ce5fdea47./info:regioninfo/1733295179637/Put/seqid=0 2024-12-04T06:53:24,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40413 is added to blk_1073741843_1027 (size=7125) 2024-12-04T06:53:24,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34175 is added to blk_1073741843_1027 (size=7125) 2024-12-04T06:53:24,312 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/data/hbase/meta/1588230740/.tmp/info/883f32440efa443f82e498932dfb74e9 2024-12-04T06:53:24,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:24,332 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/data/hbase/meta/1588230740/.tmp/ns/6469dfb1dc0240c9a78b14c60bf13bb6 is 43, key is default/ns:d/1733295179144/Put/seqid=0 2024-12-04T06:53:24,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34175 is added to blk_1073741844_1028 (size=5153) 2024-12-04T06:53:24,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40413 is added to blk_1073741844_1028 (size=5153) 2024-12-04T06:53:24,338 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/data/hbase/meta/1588230740/.tmp/ns/6469dfb1dc0240c9a78b14c60bf13bb6 2024-12-04T06:53:24,366 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/data/hbase/meta/1588230740/.tmp/table/d798d582d26f4f95a02ea230214628ff is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1733295179649/Put/seqid=0 2024-12-04T06:53:24,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34175 is added to blk_1073741845_1029 (size=5438) 2024-12-04T06:53:24,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40413 is added to blk_1073741845_1029 (size=5438) 2024-12-04T06:53:24,375 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/data/hbase/meta/1588230740/.tmp/table/d798d582d26f4f95a02ea230214628ff 2024-12-04T06:53:24,383 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/data/hbase/meta/1588230740/.tmp/info/883f32440efa443f82e498932dfb74e9 as hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/data/hbase/meta/1588230740/info/883f32440efa443f82e498932dfb74e9 2024-12-04T06:53:24,389 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/data/hbase/meta/1588230740/info/883f32440efa443f82e498932dfb74e9, entries=10, sequenceid=11, filesize=7.0 K 2024-12-04T06:53:24,390 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/data/hbase/meta/1588230740/.tmp/ns/6469dfb1dc0240c9a78b14c60bf13bb6 as hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/data/hbase/meta/1588230740/ns/6469dfb1dc0240c9a78b14c60bf13bb6 2024-12-04T06:53:24,397 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/data/hbase/meta/1588230740/ns/6469dfb1dc0240c9a78b14c60bf13bb6, entries=2, sequenceid=11, filesize=5.0 K 2024-12-04T06:53:24,398 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/data/hbase/meta/1588230740/.tmp/table/d798d582d26f4f95a02ea230214628ff as hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/data/hbase/meta/1588230740/table/d798d582d26f4f95a02ea230214628ff 2024-12-04T06:53:24,404 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/data/hbase/meta/1588230740/table/d798d582d26f4f95a02ea230214628ff, entries=2, sequenceid=11, filesize=5.3 K 2024-12-04T06:53:24,405 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 126ms, sequenceid=11, compaction requested=false 2024-12-04T06:53:24,405 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-04T06:53:24,410 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-04T06:53:24,410 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-04T06:53:24,410 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T06:53:24,410 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:53:24,410 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:53:24,410 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-04T06:53:24,410 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-04T06:53:24,410 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=625419975, stopped=false 2024-12-04T06:53:24,410 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=607fd5c6574c,34971,1733295177758 2024-12-04T06:53:24,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34971-0x1017c3f6a970000, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T06:53:24,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46331-0x1017c3f6a970001, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T06:53:24,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34971-0x1017c3f6a970000, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:53:24,412 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-04T06:53:24,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46331-0x1017c3f6a970001, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:53:24,412 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-04T06:53:24,412 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T06:53:24,412 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:53:24,413 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '607fd5c6574c,46331,1733295177828' ***** 2024-12-04T06:53:24,413 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-04T06:53:24,413 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46331-0x1017c3f6a970001, quorum=127.0.0.1:57132, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T06:53:24,413 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34971-0x1017c3f6a970000, quorum=127.0.0.1:57132, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T06:53:24,413 INFO [RS:0;607fd5c6574c:46331 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-04T06:53:24,413 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-04T06:53:24,413 INFO [RS:0;607fd5c6574c:46331 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-04T06:53:24,413 INFO [RS:0;607fd5c6574c:46331 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-04T06:53:24,413 INFO [RS:0;607fd5c6574c:46331 {}] regionserver.HRegionServer(3091): Received CLOSE for faaa1947176becac3336274ce5fdea47 2024-12-04T06:53:24,414 INFO [RS:0;607fd5c6574c:46331 {}] regionserver.HRegionServer(959): stopping server 607fd5c6574c,46331,1733295177828 2024-12-04T06:53:24,414 INFO [RS:0;607fd5c6574c:46331 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T06:53:24,414 INFO [RS:0;607fd5c6574c:46331 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;607fd5c6574c:46331. 2024-12-04T06:53:24,414 DEBUG [RS:0;607fd5c6574c:46331 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T06:53:24,414 DEBUG [RS:0;607fd5c6574c:46331 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:53:24,414 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing faaa1947176becac3336274ce5fdea47, disabling compactions & flushes 2024-12-04T06:53:24,414 INFO [RS:0;607fd5c6574c:46331 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-04T06:53:24,414 INFO [RS:0;607fd5c6574c:46331 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-04T06:53:24,414 INFO [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733295179266.faaa1947176becac3336274ce5fdea47. 2024-12-04T06:53:24,414 INFO [RS:0;607fd5c6574c:46331 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-04T06:53:24,414 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733295179266.faaa1947176becac3336274ce5fdea47. 2024-12-04T06:53:24,414 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733295179266.faaa1947176becac3336274ce5fdea47. after waiting 0 ms 2024-12-04T06:53:24,414 INFO [RS:0;607fd5c6574c:46331 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-04T06:53:24,414 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733295179266.faaa1947176becac3336274ce5fdea47. 2024-12-04T06:53:24,414 INFO [RS:0;607fd5c6574c:46331 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-04T06:53:24,414 DEBUG [RS:0;607fd5c6574c:46331 {}] regionserver.HRegionServer(1325): Online Regions={faaa1947176becac3336274ce5fdea47=TestLogRolling-testLogRollOnPipelineRestart,,1733295179266.faaa1947176becac3336274ce5fdea47., 1588230740=hbase:meta,,1.1588230740} 2024-12-04T06:53:24,414 DEBUG [RS:0;607fd5c6574c:46331 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, faaa1947176becac3336274ce5fdea47 2024-12-04T06:53:24,414 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-04T06:53:24,414 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-04T06:53:24,414 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-04T06:53:24,414 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-04T06:53:24,414 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-04T06:53:24,421 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/data/default/TestLogRolling-testLogRollOnPipelineRestart/faaa1947176becac3336274ce5fdea47/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-12-04T06:53:24,421 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-04T06:53:24,421 INFO [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733295179266.faaa1947176becac3336274ce5fdea47. 2024-12-04T06:53:24,421 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for faaa1947176becac3336274ce5fdea47: Waiting for close lock at 1733295204414Running coprocessor pre-close hooks at 1733295204414Disabling compacts and flushes for region at 1733295204414Disabling writes for close at 1733295204414Writing region close event to WAL at 1733295204415 (+1 ms)Running coprocessor post-close hooks at 1733295204421 (+6 ms)Closed at 1733295204421 2024-12-04T06:53:24,422 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-04T06:53:24,422 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733295179266.faaa1947176becac3336274ce5fdea47. 2024-12-04T06:53:24,422 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-04T06:53:24,422 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733295204414Running coprocessor pre-close hooks at 1733295204414Disabling compacts and flushes for region at 1733295204414Disabling writes for close at 1733295204414Writing region close event to WAL at 1733295204417 (+3 ms)Running coprocessor post-close hooks at 1733295204421 (+4 ms)Closed at 1733295204422 (+1 ms) 2024-12-04T06:53:24,422 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-04T06:53:24,585 INFO [regionserver/607fd5c6574c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-04T06:53:24,585 INFO [regionserver/607fd5c6574c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-04T06:53:24,586 INFO [regionserver/607fd5c6574c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T06:53:24,614 INFO [RS:0;607fd5c6574c:46331 {}] regionserver.HRegionServer(976): stopping server 607fd5c6574c,46331,1733295177828; all regions closed. 2024-12-04T06:53:24,615 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:24,615 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:24,615 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:24,615 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:24,615 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:24,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34175 is added to blk_1073741842_1025 (size=825) 2024-12-04T06:53:24,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40413 is added to blk_1073741842_1025 (size=825) 2024-12-04T06:53:25,084 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:25,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:26,085 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:26,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:27,085 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:27,206 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1013: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-04T06:53:27,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:27,733 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-04T06:53:28,086 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:28,292 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.meta.1733295179095.meta after 4002ms 2024-12-04T06:53:28,292 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/WALs/607fd5c6574c,46331,1733295177828/607fd5c6574c%2C46331%2C1733295177828.meta.1733295179095.meta to hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/oldWALs/607fd5c6574c%2C46331%2C1733295177828.meta.1733295179095.meta 2024-12-04T06:53:28,295 DEBUG [RS:0;607fd5c6574c:46331 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/oldWALs 2024-12-04T06:53:28,295 INFO [RS:0;607fd5c6574c:46331 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 607fd5c6574c%2C46331%2C1733295177828.meta:.meta(num 1733295204280) 2024-12-04T06:53:28,295 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:28,296 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:28,296 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:28,296 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:28,296 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:28,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40413 is added to blk_1073741840_1023 (size=1162) 2024-12-04T06:53:28,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34175 is added to blk_1073741840_1023 (size=1162) 2024-12-04T06:53:28,302 DEBUG [RS:0;607fd5c6574c:46331 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/oldWALs 2024-12-04T06:53:28,302 INFO [RS:0;607fd5c6574c:46331 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 607fd5c6574c%2C46331%2C1733295177828:(num 1733295204203) 2024-12-04T06:53:28,302 DEBUG [RS:0;607fd5c6574c:46331 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:53:28,302 INFO [RS:0;607fd5c6574c:46331 {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T06:53:28,302 INFO [RS:0;607fd5c6574c:46331 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T06:53:28,302 INFO [RS:0;607fd5c6574c:46331 {}] hbase.ChoreService(370): Chore service for: regionserver/607fd5c6574c:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-04T06:53:28,303 INFO [RS:0;607fd5c6574c:46331 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T06:53:28,303 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T06:53:28,303 INFO [RS:0;607fd5c6574c:46331 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46331 2024-12-04T06:53:28,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34971-0x1017c3f6a970000, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T06:53:28,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46331-0x1017c3f6a970001, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/607fd5c6574c,46331,1733295177828 2024-12-04T06:53:28,305 INFO [RS:0;607fd5c6574c:46331 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T06:53:28,307 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [607fd5c6574c,46331,1733295177828] 2024-12-04T06:53:28,308 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/607fd5c6574c,46331,1733295177828 already deleted, retry=false 2024-12-04T06:53:28,308 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 607fd5c6574c,46331,1733295177828 expired; onlineServers=0 2024-12-04T06:53:28,308 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '607fd5c6574c,34971,1733295177758' ***** 2024-12-04T06:53:28,308 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-04T06:53:28,308 INFO [M:0;607fd5c6574c:34971 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T06:53:28,308 INFO [M:0;607fd5c6574c:34971 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T06:53:28,308 DEBUG [M:0;607fd5c6574c:34971 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-04T06:53:28,308 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-04T06:53:28,308 DEBUG [M:0;607fd5c6574c:34971 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-04T06:53:28,308 DEBUG [master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.large.0-1733295178484 {}] cleaner.HFileCleaner(306): Exit Thread[master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.large.0-1733295178484,5,FailOnTimeoutGroup] 2024-12-04T06:53:28,308 DEBUG [master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.small.0-1733295178489 {}] cleaner.HFileCleaner(306): Exit Thread[master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.small.0-1733295178489,5,FailOnTimeoutGroup] 2024-12-04T06:53:28,309 INFO [M:0;607fd5c6574c:34971 {}] hbase.ChoreService(370): Chore service for: master/607fd5c6574c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-04T06:53:28,309 INFO [M:0;607fd5c6574c:34971 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T06:53:28,309 DEBUG [M:0;607fd5c6574c:34971 {}] master.HMaster(1795): Stopping service threads 2024-12-04T06:53:28,309 INFO [M:0;607fd5c6574c:34971 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-04T06:53:28,309 INFO [M:0;607fd5c6574c:34971 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-04T06:53:28,309 INFO [M:0;607fd5c6574c:34971 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-04T06:53:28,309 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-04T06:53:28,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34971-0x1017c3f6a970000, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-04T06:53:28,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34971-0x1017c3f6a970000, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:53:28,311 DEBUG [M:0;607fd5c6574c:34971 {}] zookeeper.ZKUtil(347): master:34971-0x1017c3f6a970000, quorum=127.0.0.1:57132, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-04T06:53:28,311 WARN [M:0;607fd5c6574c:34971 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-04T06:53:28,312 INFO [M:0;607fd5c6574c:34971 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/.lastflushedseqids 2024-12-04T06:53:28,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:28,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34175 is added to blk_1073741846_1030 (size=130) 2024-12-04T06:53:28,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40413 is added to blk_1073741846_1030 (size=130) 2024-12-04T06:53:28,317 INFO [M:0;607fd5c6574c:34971 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-04T06:53:28,317 INFO [M:0;607fd5c6574c:34971 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-04T06:53:28,318 DEBUG [M:0;607fd5c6574c:34971 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-04T06:53:28,318 INFO [M:0;607fd5c6574c:34971 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:53:28,318 DEBUG [M:0;607fd5c6574c:34971 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:53:28,318 DEBUG [M:0;607fd5c6574c:34971 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-04T06:53:28,318 DEBUG [M:0;607fd5c6574c:34971 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:53:28,318 INFO [M:0;607fd5c6574c:34971 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.16 KB heapSize=29.13 KB 2024-12-04T06:53:28,318 ERROR [FSHLog-0-hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData-prefix:607fd5c6574c,34971,1733295177758 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44085,DS-92862bca-953a-4443-8da2-bdbfb8aba738,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:53:28,318 WARN [FSHLog-0-hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData-prefix:607fd5c6574c,34971,1733295177758 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44085,DS-92862bca-953a-4443-8da2-bdbfb8aba738,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:53:28,318 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 607fd5c6574c%2C34971%2C1733295177758:(num 1733295178385) roll requested 2024-12-04T06:53:28,319 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C34971%2C1733295177758.1733295208318 2024-12-04T06:53:28,323 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:28,323 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:28,323 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:28,323 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:28,323 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:28,324 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/WALs/607fd5c6574c,34971,1733295177758/607fd5c6574c%2C34971%2C1733295177758.1733295178385 with entries=53, filesize=26.61 KB; new WAL /user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/WALs/607fd5c6574c,34971,1733295177758/607fd5c6574c%2C34971%2C1733295177758.1733295208318 2024-12-04T06:53:28,324 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44085,DS-92862bca-953a-4443-8da2-bdbfb8aba738,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:53:28,324 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44085,DS-92862bca-953a-4443-8da2-bdbfb8aba738,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T06:53:28,324 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/WALs/607fd5c6574c,34971,1733295177758/607fd5c6574c%2C34971%2C1733295177758.1733295178385 2024-12-04T06:53:28,324 WARN [IPC Server handler 2 on default port 41687 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/WALs/607fd5c6574c,34971,1733295177758/607fd5c6574c%2C34971%2C1733295177758.1733295178385 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-12-04T06:53:28,325 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/WALs/607fd5c6574c,34971,1733295177758/607fd5c6574c%2C34971%2C1733295177758.1733295178385 after 1ms 2024-12-04T06:53:28,328 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37587:37587),(127.0.0.1/127.0.0.1:33951:33951)] 2024-12-04T06:53:28,328 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/WALs/607fd5c6574c,34971,1733295177758/607fd5c6574c%2C34971%2C1733295177758.1733295178385 is not closed yet, will try archiving it next time 2024-12-04T06:53:28,344 DEBUG [M:0;607fd5c6574c:34971 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d73114804e274d1c8a4d62361536f509 is 82, key is hbase:meta,,1/info:regioninfo/1733295179123/Put/seqid=0 2024-12-04T06:53:28,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34175 is added to blk_1073741848_1033 (size=5672) 2024-12-04T06:53:28,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40413 is added to blk_1073741848_1033 (size=5672) 2024-12-04T06:53:28,349 INFO [M:0;607fd5c6574c:34971 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d73114804e274d1c8a4d62361536f509 2024-12-04T06:53:28,369 DEBUG [M:0;607fd5c6574c:34971 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/524107fee0b64850940dea026956b151 is 777, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733295179654/Put/seqid=0 2024-12-04T06:53:28,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40413 is added to blk_1073741849_1034 (size=6117) 2024-12-04T06:53:28,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34175 is added to blk_1073741849_1034 (size=6117) 2024-12-04T06:53:28,374 INFO [M:0;607fd5c6574c:34971 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.56 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/524107fee0b64850940dea026956b151 2024-12-04T06:53:28,393 DEBUG [M:0;607fd5c6574c:34971 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/00fcc963ef1048ffa4f4c2cd6275f704 is 69, key is 607fd5c6574c,46331,1733295177828/rs:state/1733295178569/Put/seqid=0 2024-12-04T06:53:28,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40413 is added to blk_1073741850_1035 (size=5156) 2024-12-04T06:53:28,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34175 is added to blk_1073741850_1035 (size=5156) 2024-12-04T06:53:28,398 INFO [M:0;607fd5c6574c:34971 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/00fcc963ef1048ffa4f4c2cd6275f704 2024-12-04T06:53:28,407 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46331-0x1017c3f6a970001, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T06:53:28,407 INFO [RS:0;607fd5c6574c:46331 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T06:53:28,407 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46331-0x1017c3f6a970001, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T06:53:28,407 INFO [RS:0;607fd5c6574c:46331 {}] regionserver.HRegionServer(1031): Exiting; stopping=607fd5c6574c,46331,1733295177828; zookeeper connection closed. 2024-12-04T06:53:28,407 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4cc9af3d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4cc9af3d 2024-12-04T06:53:28,407 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-04T06:53:28,417 DEBUG [M:0;607fd5c6574c:34971 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f84958178af64c5cadc41e42d3edcb45 is 52, key is load_balancer_on/state:d/1733295179261/Put/seqid=0 2024-12-04T06:53:28,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34175 is added to blk_1073741851_1036 (size=5056) 2024-12-04T06:53:28,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40413 is added to blk_1073741851_1036 (size=5056) 2024-12-04T06:53:28,422 INFO [M:0;607fd5c6574c:34971 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f84958178af64c5cadc41e42d3edcb45 2024-12-04T06:53:28,427 DEBUG [M:0;607fd5c6574c:34971 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d73114804e274d1c8a4d62361536f509 as hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d73114804e274d1c8a4d62361536f509 2024-12-04T06:53:28,432 INFO [M:0;607fd5c6574c:34971 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d73114804e274d1c8a4d62361536f509, entries=8, sequenceid=56, filesize=5.5 K 2024-12-04T06:53:28,433 DEBUG [M:0;607fd5c6574c:34971 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/524107fee0b64850940dea026956b151 as hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/524107fee0b64850940dea026956b151 2024-12-04T06:53:28,437 INFO [M:0;607fd5c6574c:34971 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/524107fee0b64850940dea026956b151, entries=6, sequenceid=56, filesize=6.0 K 2024-12-04T06:53:28,438 DEBUG [M:0;607fd5c6574c:34971 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/00fcc963ef1048ffa4f4c2cd6275f704 as hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/00fcc963ef1048ffa4f4c2cd6275f704 2024-12-04T06:53:28,443 INFO [M:0;607fd5c6574c:34971 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/00fcc963ef1048ffa4f4c2cd6275f704, entries=1, sequenceid=56, filesize=5.0 K 2024-12-04T06:53:28,444 DEBUG [M:0;607fd5c6574c:34971 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f84958178af64c5cadc41e42d3edcb45 as hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f84958178af64c5cadc41e42d3edcb45 2024-12-04T06:53:28,449 INFO [M:0;607fd5c6574c:34971 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f84958178af64c5cadc41e42d3edcb45, entries=1, sequenceid=56, filesize=4.9 K 2024-12-04T06:53:28,451 INFO [M:0;607fd5c6574c:34971 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 133ms, sequenceid=56, compaction requested=false 2024-12-04T06:53:28,452 INFO [M:0;607fd5c6574c:34971 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:53:28,452 DEBUG [M:0;607fd5c6574c:34971 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733295208318Disabling compacts and flushes for region at 1733295208318Disabling writes for close at 1733295208318Obtaining lock to block concurrent updates at 1733295208318Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733295208318Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23714, getHeapSize=29768, getOffHeapSize=0, getCellsCount=67 at 1733295208318Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733295208329 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733295208329Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733295208343 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733295208343Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733295208354 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733295208369 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733295208369Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733295208379 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733295208393 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733295208393Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733295208402 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733295208416 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733295208417 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@66cf5fb7: reopening flushed file at 1733295208427 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6dc890c5: reopening flushed file at 1733295208432 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5ef0a362: reopening flushed file at 1733295208437 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@c00918a: reopening flushed file at 1733295208444 (+7 ms)Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 133ms, sequenceid=56, compaction requested=false at 1733295208451 (+7 ms)Writing region close event to WAL at 1733295208452 (+1 ms)Closed at 1733295208452 2024-12-04T06:53:28,453 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:28,453 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:28,453 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:28,453 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:28,453 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:53:28,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40413 is added to blk_1073741847_1031 (size=757) 2024-12-04T06:53:28,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34175 is added to blk_1073741847_1031 (size=757) 2024-12-04T06:53:29,087 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:29,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:29,422 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:29,422 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:29,440 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:29,440 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:29,440 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:29,441 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:29,441 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:29,441 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:29,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:29,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:29,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:29,446 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:29,451 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:29,452 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:29,955 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-04T06:53:29,956 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:29,956 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:29,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:29,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:29,978 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:29,979 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:29,979 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:29,979 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:29,979 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:29,980 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:29,984 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:29,985 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:29,985 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:29,988 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:30,087 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:30,206 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-04T06:53:30,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:31,088 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:31,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:32,088 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:32,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:32,325 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/WALs/607fd5c6574c,34971,1733295177758/607fd5c6574c%2C34971%2C1733295177758.1733295178385 after 4001ms 2024-12-04T06:53:32,326 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/WALs/607fd5c6574c,34971,1733295177758/607fd5c6574c%2C34971%2C1733295177758.1733295178385 to hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/oldWALs/607fd5c6574c%2C34971%2C1733295177758.1733295178385 2024-12-04T06:53:32,329 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/MasterData/oldWALs/607fd5c6574c%2C34971%2C1733295177758.1733295178385 to hdfs://localhost:41687/user/jenkins/test-data/afe1dfdd-264a-1838-5424-b51b2d66195b/oldWALs/607fd5c6574c%2C34971%2C1733295177758.1733295178385$masterlocalwal$ 2024-12-04T06:53:32,329 INFO [M:0;607fd5c6574c:34971 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-04T06:53:32,329 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T06:53:32,329 INFO [M:0;607fd5c6574c:34971 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34971 2024-12-04T06:53:32,329 INFO [M:0;607fd5c6574c:34971 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T06:53:32,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34971-0x1017c3f6a970000, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T06:53:32,431 INFO [M:0;607fd5c6574c:34971 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T06:53:32,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34971-0x1017c3f6a970000, quorum=127.0.0.1:57132, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T06:53:32,434 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@39f8899d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:53:32,434 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7a441552{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T06:53:32,434 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T06:53:32,434 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75ed142f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T06:53:32,434 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6baabd83{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/hadoop.log.dir/,STOPPED} 2024-12-04T06:53:32,435 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T06:53:32,435 WARN [BP-364349617-172.17.0.2-1733295176903 heartbeating to localhost/127.0.0.1:41687 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T06:53:32,435 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T06:53:32,435 WARN [BP-364349617-172.17.0.2-1733295176903 heartbeating to localhost/127.0.0.1:41687 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-364349617-172.17.0.2-1733295176903 (Datanode Uuid 1ca9d8ab-597a-4dac-b687-393278660315) service to localhost/127.0.0.1:41687 2024-12-04T06:53:32,436 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/cluster_334a1604-d43e-79e1-6c44-751b06f7fb4e/data/data3/current/BP-364349617-172.17.0.2-1733295176903 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:53:32,436 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/cluster_334a1604-d43e-79e1-6c44-751b06f7fb4e/data/data4/current/BP-364349617-172.17.0.2-1733295176903 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:53:32,437 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T06:53:32,438 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@443f9e40{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:53:32,439 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6ff7fcbc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T06:53:32,439 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T06:53:32,439 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2460467f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T06:53:32,439 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62802d30{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/hadoop.log.dir/,STOPPED} 2024-12-04T06:53:32,440 WARN [BP-364349617-172.17.0.2-1733295176903 heartbeating to localhost/127.0.0.1:41687 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T06:53:32,440 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T06:53:32,440 WARN [BP-364349617-172.17.0.2-1733295176903 heartbeating to localhost/127.0.0.1:41687 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-364349617-172.17.0.2-1733295176903 (Datanode Uuid cbeeeff1-6991-455d-942c-a8ed47d0b9de) service to localhost/127.0.0.1:41687 2024-12-04T06:53:32,440 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T06:53:32,441 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/cluster_334a1604-d43e-79e1-6c44-751b06f7fb4e/data/data1/current/BP-364349617-172.17.0.2-1733295176903 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:53:32,441 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/cluster_334a1604-d43e-79e1-6c44-751b06f7fb4e/data/data2/current/BP-364349617-172.17.0.2-1733295176903 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:53:32,441 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T06:53:32,448 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6495f923{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-04T06:53:32,449 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1663c181{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T06:53:32,449 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T06:53:32,449 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@48d7ddcf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T06:53:32,449 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6dbadaea{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/hadoop.log.dir/,STOPPED} 2024-12-04T06:53:32,457 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-04T06:53:32,473 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-04T06:53:32,482 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=181 (was 157) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:41687 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41687 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41687 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41687 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41687 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41687 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41687 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41687 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 448) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=297 (was 285) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6382 (was 6609) 2024-12-04T06:53:32,489 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=181, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=297, ProcessCount=11, AvailableMemoryMB=6382 2024-12-04T06:53:32,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-04T06:53:32,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/hadoop.log.dir so I do NOT create it in target/test-data/acc62485-1803-af48-b138-179f3dc5d05a 2024-12-04T06:53:32,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7db9e46e-a318-923a-efcf-166fea52acd0/hadoop.tmp.dir so I do NOT create it in target/test-data/acc62485-1803-af48-b138-179f3dc5d05a 2024-12-04T06:53:32,490 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/cluster_46a0e604-073b-7e24-162c-13fc0a4e392a, deleteOnExit=true 2024-12-04T06:53:32,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-04T06:53:32,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/test.cache.data in system properties and HBase conf 2024-12-04T06:53:32,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/hadoop.tmp.dir in system properties and HBase conf 2024-12-04T06:53:32,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/hadoop.log.dir in system properties and HBase conf 2024-12-04T06:53:32,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-04T06:53:32,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-04T06:53:32,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-04T06:53:32,490 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-04T06:53:32,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-04T06:53:32,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-04T06:53:32,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-04T06:53:32,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-04T06:53:32,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-04T06:53:32,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-04T06:53:32,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-04T06:53:32,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-04T06:53:32,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-04T06:53:32,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/nfs.dump.dir in system properties and HBase conf 2024-12-04T06:53:32,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/java.io.tmpdir in system properties and HBase conf 2024-12-04T06:53:32,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-04T06:53:32,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-04T06:53:32,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-04T06:53:32,504 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-04T06:53:32,577 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:53:32,581 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T06:53:32,584 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T06:53:32,584 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T06:53:32,585 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-04T06:53:32,585 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:53:32,586 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62e6de45{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/hadoop.log.dir/,AVAILABLE} 2024-12-04T06:53:32,586 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@63f38dfa{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T06:53:32,710 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@701842fe{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/java.io.tmpdir/jetty-localhost-35581-hadoop-hdfs-3_4_1-tests_jar-_-any-4081218318272721719/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-04T06:53:32,711 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@785f0d23{HTTP/1.1, (http/1.1)}{localhost:35581} 2024-12-04T06:53:32,711 INFO [Time-limited test {}] server.Server(415): Started @185236ms 2024-12-04T06:53:32,724 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-04T06:53:32,763 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-04T06:53:32,763 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-04T06:53:32,763 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-04T06:53:32,763 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-04T06:53:32,775 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:53:32,777 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T06:53:32,778 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T06:53:32,778 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T06:53:32,778 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T06:53:32,779 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6eebb3dc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/hadoop.log.dir/,AVAILABLE} 2024-12-04T06:53:32,779 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5a296252{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T06:53:32,893 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2ac0122b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/java.io.tmpdir/jetty-localhost-44533-hadoop-hdfs-3_4_1-tests_jar-_-any-540085521080763650/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:53:32,893 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@648e3649{HTTP/1.1, (http/1.1)}{localhost:44533} 2024-12-04T06:53:32,893 INFO [Time-limited test {}] server.Server(415): Started @185418ms 2024-12-04T06:53:32,894 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T06:53:32,924 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:53:32,927 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T06:53:32,927 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T06:53:32,927 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T06:53:32,927 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-04T06:53:32,928 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46a495b0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/hadoop.log.dir/,AVAILABLE} 2024-12-04T06:53:32,928 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4cf5e3df{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T06:53:32,988 WARN [Thread-1626 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/cluster_46a0e604-073b-7e24-162c-13fc0a4e392a/data/data2/current/BP-1483678462-172.17.0.2-1733295212521/current, will proceed with Du for space computation calculation, 2024-12-04T06:53:32,988 WARN [Thread-1625 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/cluster_46a0e604-073b-7e24-162c-13fc0a4e392a/data/data1/current/BP-1483678462-172.17.0.2-1733295212521/current, will proceed with Du for space computation calculation, 2024-12-04T06:53:33,004 WARN [Thread-1604 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T06:53:33,006 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x97684cc945abfa3d with lease ID 0xad029bd5fb9f61d: Processing first storage report for DS-6b4043f0-7660-4872-8992-092e6a1d7b22 from datanode DatanodeRegistration(127.0.0.1:44993, datanodeUuid=97848155-f0a3-45d7-bb2a-8af608a32d39, infoPort=42445, infoSecurePort=0, ipcPort=34729, storageInfo=lv=-57;cid=testClusterID;nsid=382459660;c=1733295212521) 2024-12-04T06:53:33,006 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x97684cc945abfa3d with lease ID 0xad029bd5fb9f61d: from storage DS-6b4043f0-7660-4872-8992-092e6a1d7b22 node DatanodeRegistration(127.0.0.1:44993, datanodeUuid=97848155-f0a3-45d7-bb2a-8af608a32d39, infoPort=42445, infoSecurePort=0, ipcPort=34729, storageInfo=lv=-57;cid=testClusterID;nsid=382459660;c=1733295212521), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:53:33,006 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x97684cc945abfa3d with lease ID 0xad029bd5fb9f61d: Processing first storage report for DS-e98d879d-05c9-4bc3-9aa1-a04b16f1a767 from datanode DatanodeRegistration(127.0.0.1:44993, datanodeUuid=97848155-f0a3-45d7-bb2a-8af608a32d39, infoPort=42445, infoSecurePort=0, ipcPort=34729, storageInfo=lv=-57;cid=testClusterID;nsid=382459660;c=1733295212521) 2024-12-04T06:53:33,006 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x97684cc945abfa3d with lease ID 0xad029bd5fb9f61d: from storage DS-e98d879d-05c9-4bc3-9aa1-a04b16f1a767 node DatanodeRegistration(127.0.0.1:44993, datanodeUuid=97848155-f0a3-45d7-bb2a-8af608a32d39, infoPort=42445, infoSecurePort=0, ipcPort=34729, storageInfo=lv=-57;cid=testClusterID;nsid=382459660;c=1733295212521), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:53:33,047 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@39212263{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/java.io.tmpdir/jetty-localhost-39379-hadoop-hdfs-3_4_1-tests_jar-_-any-14756107014192019078/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:53:33,047 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@10128232{HTTP/1.1, (http/1.1)}{localhost:39379} 2024-12-04T06:53:33,047 INFO [Time-limited test {}] server.Server(415): Started @185572ms 2024-12-04T06:53:33,049 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T06:53:33,089 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:33,141 WARN [Thread-1651 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/cluster_46a0e604-073b-7e24-162c-13fc0a4e392a/data/data3/current/BP-1483678462-172.17.0.2-1733295212521/current, will proceed with Du for space computation calculation, 2024-12-04T06:53:33,141 WARN [Thread-1652 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/cluster_46a0e604-073b-7e24-162c-13fc0a4e392a/data/data4/current/BP-1483678462-172.17.0.2-1733295212521/current, will proceed with Du for space computation calculation, 2024-12-04T06:53:33,160 WARN [Thread-1640 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T06:53:33,162 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x941fde8ad90cf8b9 with lease ID 0xad029bd5fb9f61e: Processing first storage report for DS-f9a2cbbd-94f2-48dc-a103-90506a6cdd50 from datanode DatanodeRegistration(127.0.0.1:39183, datanodeUuid=03209b56-a62b-4b23-8b43-952e8b483d15, infoPort=45059, infoSecurePort=0, ipcPort=44891, storageInfo=lv=-57;cid=testClusterID;nsid=382459660;c=1733295212521) 2024-12-04T06:53:33,162 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x941fde8ad90cf8b9 with lease ID 0xad029bd5fb9f61e: from storage DS-f9a2cbbd-94f2-48dc-a103-90506a6cdd50 node DatanodeRegistration(127.0.0.1:39183, datanodeUuid=03209b56-a62b-4b23-8b43-952e8b483d15, infoPort=45059, infoSecurePort=0, ipcPort=44891, storageInfo=lv=-57;cid=testClusterID;nsid=382459660;c=1733295212521), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:53:33,162 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x941fde8ad90cf8b9 with lease ID 0xad029bd5fb9f61e: Processing first storage report for DS-0addba62-b1e9-49b1-b16e-6dd5e201fe8d from datanode DatanodeRegistration(127.0.0.1:39183, datanodeUuid=03209b56-a62b-4b23-8b43-952e8b483d15, infoPort=45059, infoSecurePort=0, ipcPort=44891, storageInfo=lv=-57;cid=testClusterID;nsid=382459660;c=1733295212521) 2024-12-04T06:53:33,162 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x941fde8ad90cf8b9 with lease ID 0xad029bd5fb9f61e: from storage DS-0addba62-b1e9-49b1-b16e-6dd5e201fe8d node DatanodeRegistration(127.0.0.1:39183, datanodeUuid=03209b56-a62b-4b23-8b43-952e8b483d15, infoPort=45059, infoSecurePort=0, ipcPort=44891, storageInfo=lv=-57;cid=testClusterID;nsid=382459660;c=1733295212521), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:53:33,171 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a 2024-12-04T06:53:33,173 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/cluster_46a0e604-073b-7e24-162c-13fc0a4e392a/zookeeper_0, clientPort=55528, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/cluster_46a0e604-073b-7e24-162c-13fc0a4e392a/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/cluster_46a0e604-073b-7e24-162c-13fc0a4e392a/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-04T06:53:33,174 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55528 2024-12-04T06:53:33,174 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:53:33,175 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:53:33,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44993 is added to blk_1073741825_1001 (size=7) 2024-12-04T06:53:33,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39183 is added to blk_1073741825_1001 (size=7) 2024-12-04T06:53:33,184 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692 with version=8 2024-12-04T06:53:33,184 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/hbase-staging 2024-12-04T06:53:33,186 INFO [Time-limited test {}] client.ConnectionUtils(128): master/607fd5c6574c:0 server-side Connection retries=45 2024-12-04T06:53:33,186 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T06:53:33,186 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T06:53:33,186 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T06:53:33,186 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T06:53:33,186 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T06:53:33,187 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-04T06:53:33,187 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T06:53:33,187 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39693 2024-12-04T06:53:33,188 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39693 connecting to ZooKeeper ensemble=127.0.0.1:55528 2024-12-04T06:53:33,199 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:396930x0, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T06:53:33,199 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39693-0x1017c3ff5040000 connected 2024-12-04T06:53:33,221 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:53:33,222 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:53:33,224 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39693-0x1017c3ff5040000, quorum=127.0.0.1:55528, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T06:53:33,225 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692, hbase.cluster.distributed=false 2024-12-04T06:53:33,227 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39693-0x1017c3ff5040000, quorum=127.0.0.1:55528, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T06:53:33,227 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39693 2024-12-04T06:53:33,227 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39693 2024-12-04T06:53:33,228 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39693 2024-12-04T06:53:33,228 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39693 2024-12-04T06:53:33,228 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39693 2024-12-04T06:53:33,244 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/607fd5c6574c:0 server-side Connection retries=45 2024-12-04T06:53:33,244 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T06:53:33,244 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T06:53:33,244 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T06:53:33,244 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T06:53:33,244 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T06:53:33,244 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-04T06:53:33,244 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T06:53:33,245 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38689 2024-12-04T06:53:33,246 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38689 connecting to ZooKeeper ensemble=127.0.0.1:55528 2024-12-04T06:53:33,246 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:53:33,248 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:53:33,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:386890x0, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T06:53:33,253 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38689-0x1017c3ff5040001 connected 2024-12-04T06:53:33,253 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38689-0x1017c3ff5040001, quorum=127.0.0.1:55528, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T06:53:33,253 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-04T06:53:33,253 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-04T06:53:33,254 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38689-0x1017c3ff5040001, quorum=127.0.0.1:55528, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-04T06:53:33,255 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38689-0x1017c3ff5040001, quorum=127.0.0.1:55528, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T06:53:33,255 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38689 2024-12-04T06:53:33,255 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38689 2024-12-04T06:53:33,256 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38689 2024-12-04T06:53:33,256 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38689 2024-12-04T06:53:33,256 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38689 2024-12-04T06:53:33,268 DEBUG [M:0;607fd5c6574c:39693 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;607fd5c6574c:39693 2024-12-04T06:53:33,268 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/607fd5c6574c,39693,1733295213186 2024-12-04T06:53:33,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39693-0x1017c3ff5040000, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T06:53:33,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38689-0x1017c3ff5040001, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T06:53:33,272 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39693-0x1017c3ff5040000, quorum=127.0.0.1:55528, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/607fd5c6574c,39693,1733295213186 2024-12-04T06:53:33,274 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39693-0x1017c3ff5040000, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:53:33,274 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38689-0x1017c3ff5040001, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-04T06:53:33,274 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38689-0x1017c3ff5040001, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:53:33,274 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39693-0x1017c3ff5040000, quorum=127.0.0.1:55528, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-04T06:53:33,275 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/607fd5c6574c,39693,1733295213186 from backup master directory 2024-12-04T06:53:33,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39693-0x1017c3ff5040000, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/607fd5c6574c,39693,1733295213186 2024-12-04T06:53:33,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39693-0x1017c3ff5040000, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T06:53:33,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38689-0x1017c3ff5040001, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T06:53:33,276 WARN [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T06:53:33,276 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=607fd5c6574c,39693,1733295213186 2024-12-04T06:53:33,280 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/hbase.id] with ID: c81f10d8-ee5f-447b-85dd-e9dac2665fcc 2024-12-04T06:53:33,280 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/.tmp/hbase.id 2024-12-04T06:53:33,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44993 is added to blk_1073741826_1002 (size=42) 2024-12-04T06:53:33,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39183 is added to blk_1073741826_1002 (size=42) 2024-12-04T06:53:33,286 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/.tmp/hbase.id]:[hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/hbase.id] 2024-12-04T06:53:33,296 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:53:33,296 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-04T06:53:33,297 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-04T06:53:33,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39693-0x1017c3ff5040000, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:53:33,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38689-0x1017c3ff5040001, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:53:33,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39183 is added to blk_1073741827_1003 (size=196) 2024-12-04T06:53:33,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44993 is added to blk_1073741827_1003 (size=196) 2024-12-04T06:53:33,306 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T06:53:33,307 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-04T06:53:33,307 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T06:53:33,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39183 is added to blk_1073741828_1004 (size=1189) 2024-12-04T06:53:33,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44993 is added to blk_1073741828_1004 (size=1189) 2024-12-04T06:53:33,317 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/MasterData/data/master/store 2024-12-04T06:53:33,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:33,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44993 is added to blk_1073741829_1005 (size=34) 2024-12-04T06:53:33,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39183 is added to blk_1073741829_1005 (size=34) 2024-12-04T06:53:33,324 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:53:33,324 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-04T06:53:33,324 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:53:33,324 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:53:33,324 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-04T06:53:33,324 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:53:33,324 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:53:33,324 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733295213324Disabling compacts and flushes for region at 1733295213324Disabling writes for close at 1733295213324Writing region close event to WAL at 1733295213324Closed at 1733295213324 2024-12-04T06:53:33,325 WARN [master/607fd5c6574c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/MasterData/data/master/store/.initializing 2024-12-04T06:53:33,325 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/MasterData/WALs/607fd5c6574c,39693,1733295213186 2024-12-04T06:53:33,328 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=607fd5c6574c%2C39693%2C1733295213186, suffix=, logDir=hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/MasterData/WALs/607fd5c6574c,39693,1733295213186, archiveDir=hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/MasterData/oldWALs, maxLogs=10 2024-12-04T06:53:33,328 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C39693%2C1733295213186.1733295213328 2024-12-04T06:53:33,332 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/MasterData/WALs/607fd5c6574c,39693,1733295213186/607fd5c6574c%2C39693%2C1733295213186.1733295213328 2024-12-04T06:53:33,335 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42445:42445),(127.0.0.1/127.0.0.1:45059:45059)] 2024-12-04T06:53:33,336 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-04T06:53:33,336 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:53:33,336 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:53:33,336 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:53:33,338 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:53:33,340 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-04T06:53:33,340 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:53:33,340 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:53:33,340 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:53:33,341 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-04T06:53:33,341 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:53:33,342 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T06:53:33,342 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:53:33,343 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-04T06:53:33,343 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:53:33,343 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T06:53:33,343 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:53:33,344 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-04T06:53:33,345 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:53:33,345 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T06:53:33,345 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:53:33,346 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:53:33,346 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:53:33,347 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:53:33,347 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:53:33,348 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-04T06:53:33,349 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:53:33,351 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T06:53:33,351 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=721523, jitterRate=-0.08253639936447144}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-04T06:53:33,352 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733295213336Initializing all the Stores at 1733295213337 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295213337Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295213338 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295213338Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295213338Cleaning up temporary data from old regions at 1733295213347 (+9 ms)Region opened successfully at 1733295213352 (+5 ms) 2024-12-04T06:53:33,352 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-04T06:53:33,355 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@216e0b63, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=607fd5c6574c/172.17.0.2:0 2024-12-04T06:53:33,356 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-04T06:53:33,356 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-04T06:53:33,356 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-04T06:53:33,356 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-04T06:53:33,357 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-04T06:53:33,357 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-04T06:53:33,357 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-04T06:53:33,359 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-04T06:53:33,360 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39693-0x1017c3ff5040000, quorum=127.0.0.1:55528, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-04T06:53:33,362 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-04T06:53:33,362 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-04T06:53:33,363 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39693-0x1017c3ff5040000, quorum=127.0.0.1:55528, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-04T06:53:33,364 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-04T06:53:33,364 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-04T06:53:33,365 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39693-0x1017c3ff5040000, quorum=127.0.0.1:55528, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-04T06:53:33,367 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-04T06:53:33,367 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39693-0x1017c3ff5040000, quorum=127.0.0.1:55528, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-04T06:53:33,369 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-04T06:53:33,370 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39693-0x1017c3ff5040000, quorum=127.0.0.1:55528, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-04T06:53:33,372 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-04T06:53:33,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39693-0x1017c3ff5040000, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T06:53:33,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39693-0x1017c3ff5040000, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:53:33,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38689-0x1017c3ff5040001, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T06:53:33,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38689-0x1017c3ff5040001, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:53:33,375 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=607fd5c6574c,39693,1733295213186, sessionid=0x1017c3ff5040000, setting cluster-up flag (Was=false) 2024-12-04T06:53:33,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39693-0x1017c3ff5040000, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:53:33,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38689-0x1017c3ff5040001, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:53:33,384 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-04T06:53:33,385 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=607fd5c6574c,39693,1733295213186 2024-12-04T06:53:33,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39693-0x1017c3ff5040000, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:53:33,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38689-0x1017c3ff5040001, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:53:33,394 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-04T06:53:33,395 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=607fd5c6574c,39693,1733295213186 2024-12-04T06:53:33,396 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-04T06:53:33,398 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-04T06:53:33,398 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-04T06:53:33,398 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-04T06:53:33,398 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 607fd5c6574c,39693,1733295213186 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-04T06:53:33,399 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/607fd5c6574c:0, corePoolSize=5, maxPoolSize=5 2024-12-04T06:53:33,399 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/607fd5c6574c:0, corePoolSize=5, maxPoolSize=5 2024-12-04T06:53:33,399 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/607fd5c6574c:0, corePoolSize=5, maxPoolSize=5 2024-12-04T06:53:33,399 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/607fd5c6574c:0, corePoolSize=5, maxPoolSize=5 2024-12-04T06:53:33,399 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/607fd5c6574c:0, corePoolSize=10, maxPoolSize=10 2024-12-04T06:53:33,399 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:53:33,399 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/607fd5c6574c:0, corePoolSize=2, maxPoolSize=2 2024-12-04T06:53:33,399 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:53:33,400 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733295243400 2024-12-04T06:53:33,400 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-04T06:53:33,400 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-04T06:53:33,400 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-04T06:53:33,400 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-04T06:53:33,400 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-04T06:53:33,400 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-04T06:53:33,401 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-04T06:53:33,401 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-04T06:53:33,401 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-04T06:53:33,401 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-04T06:53:33,401 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T06:53:33,401 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-04T06:53:33,401 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-04T06:53:33,401 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-04T06:53:33,402 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.large.0-1733295213402,5,FailOnTimeoutGroup] 2024-12-04T06:53:33,402 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.small.0-1733295213402,5,FailOnTimeoutGroup] 2024-12-04T06:53:33,402 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-04T06:53:33,402 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-04T06:53:33,402 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:53:33,402 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-04T06:53:33,402 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-04T06:53:33,402 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-04T06:53:33,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44993 is added to blk_1073741831_1007 (size=1321) 2024-12-04T06:53:33,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39183 is added to blk_1073741831_1007 (size=1321) 2024-12-04T06:53:33,410 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-04T06:53:33,410 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692 2024-12-04T06:53:33,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39183 is added to blk_1073741832_1008 (size=32) 2024-12-04T06:53:33,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44993 is added to blk_1073741832_1008 (size=32) 2024-12-04T06:53:33,417 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:53:33,418 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-04T06:53:33,419 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-04T06:53:33,419 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:53:33,419 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:53:33,419 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-04T06:53:33,420 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-04T06:53:33,420 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:53:33,421 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:53:33,421 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-04T06:53:33,422 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-04T06:53:33,422 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:53:33,422 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:53:33,423 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-04T06:53:33,424 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-04T06:53:33,424 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:53:33,425 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:53:33,425 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-04T06:53:33,425 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/hbase/meta/1588230740 2024-12-04T06:53:33,426 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/hbase/meta/1588230740 2024-12-04T06:53:33,427 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-04T06:53:33,427 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-04T06:53:33,427 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T06:53:33,428 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-04T06:53:33,430 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T06:53:33,430 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=879535, jitterRate=0.11838695406913757}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T06:53:33,431 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733295213417Initializing all the Stores at 1733295213417Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295213417Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295213418 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295213418Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295213418Cleaning up temporary data from old regions at 1733295213427 (+9 ms)Region opened successfully at 1733295213431 (+4 ms) 2024-12-04T06:53:33,431 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-04T06:53:33,431 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-04T06:53:33,431 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-04T06:53:33,431 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-04T06:53:33,431 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-04T06:53:33,431 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-04T06:53:33,431 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733295213431Disabling compacts and flushes for region at 1733295213431Disabling writes for close at 1733295213431Writing region close event to WAL at 1733295213431Closed at 1733295213431 2024-12-04T06:53:33,432 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T06:53:33,432 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-04T06:53:33,433 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-04T06:53:33,434 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-04T06:53:33,435 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-04T06:53:33,457 INFO [RS:0;607fd5c6574c:38689 {}] regionserver.HRegionServer(746): ClusterId : c81f10d8-ee5f-447b-85dd-e9dac2665fcc 2024-12-04T06:53:33,457 DEBUG [RS:0;607fd5c6574c:38689 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-04T06:53:33,461 DEBUG [RS:0;607fd5c6574c:38689 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-04T06:53:33,461 DEBUG [RS:0;607fd5c6574c:38689 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-04T06:53:33,463 DEBUG [RS:0;607fd5c6574c:38689 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-04T06:53:33,463 DEBUG [RS:0;607fd5c6574c:38689 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b6aff89, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=607fd5c6574c/172.17.0.2:0 2024-12-04T06:53:33,475 DEBUG [RS:0;607fd5c6574c:38689 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;607fd5c6574c:38689 2024-12-04T06:53:33,475 INFO [RS:0;607fd5c6574c:38689 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-04T06:53:33,475 INFO [RS:0;607fd5c6574c:38689 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-04T06:53:33,475 DEBUG [RS:0;607fd5c6574c:38689 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-04T06:53:33,476 INFO [RS:0;607fd5c6574c:38689 {}] regionserver.HRegionServer(2659): reportForDuty to master=607fd5c6574c,39693,1733295213186 with port=38689, startcode=1733295213243 2024-12-04T06:53:33,476 DEBUG [RS:0;607fd5c6574c:38689 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-04T06:53:33,478 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41217, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-04T06:53:33,478 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39693 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 607fd5c6574c,38689,1733295213243 2024-12-04T06:53:33,479 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39693 {}] master.ServerManager(517): Registering regionserver=607fd5c6574c,38689,1733295213243 2024-12-04T06:53:33,480 DEBUG [RS:0;607fd5c6574c:38689 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692 2024-12-04T06:53:33,480 DEBUG [RS:0;607fd5c6574c:38689 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40177 2024-12-04T06:53:33,480 DEBUG [RS:0;607fd5c6574c:38689 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-04T06:53:33,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39693-0x1017c3ff5040000, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T06:53:33,482 DEBUG [RS:0;607fd5c6574c:38689 {}] zookeeper.ZKUtil(111): regionserver:38689-0x1017c3ff5040001, quorum=127.0.0.1:55528, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/607fd5c6574c,38689,1733295213243 2024-12-04T06:53:33,482 WARN [RS:0;607fd5c6574c:38689 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T06:53:33,482 INFO [RS:0;607fd5c6574c:38689 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T06:53:33,483 DEBUG [RS:0;607fd5c6574c:38689 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/WALs/607fd5c6574c,38689,1733295213243 2024-12-04T06:53:33,483 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [607fd5c6574c,38689,1733295213243] 2024-12-04T06:53:33,486 INFO [RS:0;607fd5c6574c:38689 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-04T06:53:33,488 INFO [RS:0;607fd5c6574c:38689 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-04T06:53:33,488 INFO [RS:0;607fd5c6574c:38689 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-04T06:53:33,488 INFO [RS:0;607fd5c6574c:38689 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T06:53:33,489 INFO [RS:0;607fd5c6574c:38689 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-04T06:53:33,489 INFO [RS:0;607fd5c6574c:38689 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-04T06:53:33,489 INFO [RS:0;607fd5c6574c:38689 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-04T06:53:33,489 DEBUG [RS:0;607fd5c6574c:38689 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:53:33,489 DEBUG [RS:0;607fd5c6574c:38689 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:53:33,489 DEBUG [RS:0;607fd5c6574c:38689 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:53:33,490 DEBUG [RS:0;607fd5c6574c:38689 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:53:33,490 DEBUG [RS:0;607fd5c6574c:38689 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:53:33,490 DEBUG [RS:0;607fd5c6574c:38689 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/607fd5c6574c:0, corePoolSize=2, maxPoolSize=2 2024-12-04T06:53:33,490 DEBUG [RS:0;607fd5c6574c:38689 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:53:33,490 DEBUG [RS:0;607fd5c6574c:38689 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:53:33,490 DEBUG [RS:0;607fd5c6574c:38689 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:53:33,490 DEBUG [RS:0;607fd5c6574c:38689 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:53:33,490 DEBUG [RS:0;607fd5c6574c:38689 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:53:33,490 DEBUG [RS:0;607fd5c6574c:38689 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:53:33,490 DEBUG [RS:0;607fd5c6574c:38689 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/607fd5c6574c:0, corePoolSize=3, maxPoolSize=3 2024-12-04T06:53:33,490 DEBUG [RS:0;607fd5c6574c:38689 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0, corePoolSize=3, maxPoolSize=3 2024-12-04T06:53:33,491 INFO [RS:0;607fd5c6574c:38689 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T06:53:33,491 INFO [RS:0;607fd5c6574c:38689 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T06:53:33,491 INFO [RS:0;607fd5c6574c:38689 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T06:53:33,491 INFO [RS:0;607fd5c6574c:38689 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-04T06:53:33,491 INFO [RS:0;607fd5c6574c:38689 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-04T06:53:33,491 INFO [RS:0;607fd5c6574c:38689 {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,38689,1733295213243-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T06:53:33,506 INFO [RS:0;607fd5c6574c:38689 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-04T06:53:33,506 INFO [RS:0;607fd5c6574c:38689 {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,38689,1733295213243-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T06:53:33,507 INFO [RS:0;607fd5c6574c:38689 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:53:33,507 INFO [RS:0;607fd5c6574c:38689 {}] regionserver.Replication(171): 607fd5c6574c,38689,1733295213243 started 2024-12-04T06:53:33,520 INFO [RS:0;607fd5c6574c:38689 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:53:33,521 INFO [RS:0;607fd5c6574c:38689 {}] regionserver.HRegionServer(1482): Serving as 607fd5c6574c,38689,1733295213243, RpcServer on 607fd5c6574c/172.17.0.2:38689, sessionid=0x1017c3ff5040001 2024-12-04T06:53:33,521 DEBUG [RS:0;607fd5c6574c:38689 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-04T06:53:33,521 DEBUG [RS:0;607fd5c6574c:38689 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 607fd5c6574c,38689,1733295213243 2024-12-04T06:53:33,521 DEBUG [RS:0;607fd5c6574c:38689 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '607fd5c6574c,38689,1733295213243' 2024-12-04T06:53:33,521 DEBUG [RS:0;607fd5c6574c:38689 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-04T06:53:33,521 DEBUG [RS:0;607fd5c6574c:38689 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-04T06:53:33,522 DEBUG [RS:0;607fd5c6574c:38689 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-04T06:53:33,522 DEBUG [RS:0;607fd5c6574c:38689 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-04T06:53:33,522 DEBUG [RS:0;607fd5c6574c:38689 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 607fd5c6574c,38689,1733295213243 2024-12-04T06:53:33,522 DEBUG [RS:0;607fd5c6574c:38689 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '607fd5c6574c,38689,1733295213243' 2024-12-04T06:53:33,522 DEBUG [RS:0;607fd5c6574c:38689 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-04T06:53:33,522 DEBUG [RS:0;607fd5c6574c:38689 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-04T06:53:33,523 DEBUG [RS:0;607fd5c6574c:38689 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-04T06:53:33,523 INFO [RS:0;607fd5c6574c:38689 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-04T06:53:33,523 INFO [RS:0;607fd5c6574c:38689 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-04T06:53:33,585 WARN [607fd5c6574c:39693 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-04T06:53:33,625 INFO [RS:0;607fd5c6574c:38689 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=607fd5c6574c%2C38689%2C1733295213243, suffix=, logDir=hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/WALs/607fd5c6574c,38689,1733295213243, archiveDir=hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/oldWALs, maxLogs=32 2024-12-04T06:53:33,625 INFO [RS:0;607fd5c6574c:38689 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C38689%2C1733295213243.1733295213625 2024-12-04T06:53:33,631 INFO [RS:0;607fd5c6574c:38689 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/WALs/607fd5c6574c,38689,1733295213243/607fd5c6574c%2C38689%2C1733295213243.1733295213625 2024-12-04T06:53:33,632 DEBUG [RS:0;607fd5c6574c:38689 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45059:45059),(127.0.0.1/127.0.0.1:42445:42445)] 2024-12-04T06:53:33,836 DEBUG [607fd5c6574c:39693 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-04T06:53:33,836 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=607fd5c6574c,38689,1733295213243 2024-12-04T06:53:33,837 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 607fd5c6574c,38689,1733295213243, state=OPENING 2024-12-04T06:53:33,839 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-04T06:53:33,842 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39693-0x1017c3ff5040000, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:53:33,842 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38689-0x1017c3ff5040001, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:53:33,843 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-04T06:53:33,843 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T06:53:33,843 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T06:53:33,843 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=607fd5c6574c,38689,1733295213243}] 2024-12-04T06:53:33,996 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-04T06:53:33,998 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60285, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-04T06:53:34,001 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-04T06:53:34,001 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T06:53:34,003 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=607fd5c6574c%2C38689%2C1733295213243.meta, suffix=.meta, logDir=hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/WALs/607fd5c6574c,38689,1733295213243, archiveDir=hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/oldWALs, maxLogs=32 2024-12-04T06:53:34,003 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C38689%2C1733295213243.meta.1733295214003.meta 2024-12-04T06:53:34,008 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/WALs/607fd5c6574c,38689,1733295213243/607fd5c6574c%2C38689%2C1733295213243.meta.1733295214003.meta 2024-12-04T06:53:34,010 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45059:45059),(127.0.0.1/127.0.0.1:42445:42445)] 2024-12-04T06:53:34,011 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-04T06:53:34,011 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-04T06:53:34,011 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-04T06:53:34,011 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-04T06:53:34,011 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-04T06:53:34,011 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:53:34,011 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-04T06:53:34,011 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-04T06:53:34,013 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-04T06:53:34,013 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-04T06:53:34,013 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:53:34,014 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:53:34,014 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-04T06:53:34,015 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-04T06:53:34,015 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:53:34,015 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:53:34,015 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-04T06:53:34,016 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-04T06:53:34,016 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:53:34,016 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:53:34,016 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-04T06:53:34,017 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-04T06:53:34,017 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:53:34,017 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:53:34,017 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-04T06:53:34,018 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/hbase/meta/1588230740 2024-12-04T06:53:34,019 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/hbase/meta/1588230740 2024-12-04T06:53:34,020 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-04T06:53:34,020 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-04T06:53:34,020 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T06:53:34,021 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-04T06:53:34,022 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=733506, jitterRate=-0.06730014085769653}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T06:53:34,022 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-04T06:53:34,023 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733295214012Writing region info on filesystem at 1733295214012Initializing all the Stores at 1733295214012Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295214012Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295214012Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295214012Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295214012Cleaning up temporary data from old regions at 1733295214020 (+8 ms)Running coprocessor post-open hooks at 1733295214022 (+2 ms)Region opened successfully at 1733295214023 (+1 ms) 2024-12-04T06:53:34,024 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733295213996 2024-12-04T06:53:34,026 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-04T06:53:34,026 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-04T06:53:34,027 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=607fd5c6574c,38689,1733295213243 2024-12-04T06:53:34,028 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 607fd5c6574c,38689,1733295213243, state=OPEN 2024-12-04T06:53:34,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39693-0x1017c3ff5040000, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T06:53:34,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38689-0x1017c3ff5040001, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T06:53:34,035 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T06:53:34,035 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=607fd5c6574c,38689,1733295213243 2024-12-04T06:53:34,035 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T06:53:34,038 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-04T06:53:34,038 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=607fd5c6574c,38689,1733295213243 in 192 msec 2024-12-04T06:53:34,041 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-04T06:53:34,041 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 605 msec 2024-12-04T06:53:34,041 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T06:53:34,041 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-04T06:53:34,043 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-04T06:53:34,043 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=607fd5c6574c,38689,1733295213243, seqNum=-1] 2024-12-04T06:53:34,043 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T06:53:34,045 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43639, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T06:53:34,051 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 652 msec 2024-12-04T06:53:34,051 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733295214051, completionTime=-1 2024-12-04T06:53:34,051 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-04T06:53:34,051 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-04T06:53:34,053 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-04T06:53:34,053 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733295274053 2024-12-04T06:53:34,053 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733295334053 2024-12-04T06:53:34,053 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-04T06:53:34,053 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,39693,1733295213186-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T06:53:34,053 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,39693,1733295213186-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:53:34,053 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,39693,1733295213186-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:53:34,053 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-607fd5c6574c:39693, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:53:34,053 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-04T06:53:34,053 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-04T06:53:34,055 DEBUG [master/607fd5c6574c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-04T06:53:34,056 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.780sec 2024-12-04T06:53:34,056 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-04T06:53:34,056 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-04T06:53:34,056 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-04T06:53:34,056 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-04T06:53:34,056 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-04T06:53:34,056 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,39693,1733295213186-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T06:53:34,056 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,39693,1733295213186-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-04T06:53:34,058 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a2777cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T06:53:34,058 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 607fd5c6574c,39693,-1 for getting cluster id 2024-12-04T06:53:34,058 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-04T06:53:34,059 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-04T06:53:34,059 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-04T06:53:34,059 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,39693,1733295213186-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:53:34,060 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c81f10d8-ee5f-447b-85dd-e9dac2665fcc' 2024-12-04T06:53:34,060 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-04T06:53:34,060 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c81f10d8-ee5f-447b-85dd-e9dac2665fcc" 2024-12-04T06:53:34,060 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@671e408b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T06:53:34,060 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [607fd5c6574c,39693,-1] 2024-12-04T06:53:34,061 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-04T06:53:34,061 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:53:34,062 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53714, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-04T06:53:34,063 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@555a4a92, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T06:53:34,063 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-04T06:53:34,064 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=607fd5c6574c,38689,1733295213243, seqNum=-1] 2024-12-04T06:53:34,064 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T06:53:34,065 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34754, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T06:53:34,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=607fd5c6574c,39693,1733295213186 2024-12-04T06:53:34,067 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:53:34,069 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-04T06:53:34,070 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-04T06:53:34,070 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 607fd5c6574c,39693,1733295213186 2024-12-04T06:53:34,070 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@14ddc9e8 2024-12-04T06:53:34,070 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-04T06:53:34,071 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53720, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-04T06:53:34,072 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39693 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-04T06:53:34,072 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39693 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-04T06:53:34,072 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39693 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T06:53:34,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39693 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-04T06:53:34,075 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-04T06:53:34,075 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:53:34,075 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39693 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-12-04T06:53:34,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39693 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-04T06:53:34,076 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-04T06:53:34,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44993 is added to blk_1073741835_1011 (size=405) 2024-12-04T06:53:34,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39183 is added to blk_1073741835_1011 (size=405) 2024-12-04T06:53:34,085 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 3c138aef98ea77ba30e0a6fb902c0f4d, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692 2024-12-04T06:53:34,089 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:34,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44993 is added to blk_1073741836_1012 (size=88) 2024-12-04T06:53:34,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39183 is added to blk_1073741836_1012 (size=88) 2024-12-04T06:53:34,091 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:53:34,091 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 3c138aef98ea77ba30e0a6fb902c0f4d, disabling compactions & flushes 2024-12-04T06:53:34,091 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d. 2024-12-04T06:53:34,091 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d. 2024-12-04T06:53:34,091 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d. after waiting 0 ms 2024-12-04T06:53:34,091 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d. 2024-12-04T06:53:34,091 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d. 2024-12-04T06:53:34,091 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 3c138aef98ea77ba30e0a6fb902c0f4d: Waiting for close lock at 1733295214091Disabling compacts and flushes for region at 1733295214091Disabling writes for close at 1733295214091Writing region close event to WAL at 1733295214091Closed at 1733295214091 2024-12-04T06:53:34,092 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-04T06:53:34,093 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1733295214092"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733295214092"}]},"ts":"1733295214092"} 2024-12-04T06:53:34,095 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-04T06:53:34,096 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-04T06:53:34,096 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733295214096"}]},"ts":"1733295214096"} 2024-12-04T06:53:34,099 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-12-04T06:53:34,099 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=3c138aef98ea77ba30e0a6fb902c0f4d, ASSIGN}] 2024-12-04T06:53:34,100 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=3c138aef98ea77ba30e0a6fb902c0f4d, ASSIGN 2024-12-04T06:53:34,101 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=3c138aef98ea77ba30e0a6fb902c0f4d, ASSIGN; state=OFFLINE, location=607fd5c6574c,38689,1733295213243; forceNewPlan=false, retain=false 2024-12-04T06:53:34,252 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=3c138aef98ea77ba30e0a6fb902c0f4d, regionState=OPENING, regionLocation=607fd5c6574c,38689,1733295213243 2024-12-04T06:53:34,255 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=3c138aef98ea77ba30e0a6fb902c0f4d, ASSIGN because future has completed 2024-12-04T06:53:34,255 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3c138aef98ea77ba30e0a6fb902c0f4d, server=607fd5c6574c,38689,1733295213243}] 2024-12-04T06:53:34,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:34,412 INFO [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d. 2024-12-04T06:53:34,412 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 3c138aef98ea77ba30e0a6fb902c0f4d, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d.', STARTKEY => '', ENDKEY => ''} 2024-12-04T06:53:34,412 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 3c138aef98ea77ba30e0a6fb902c0f4d 2024-12-04T06:53:34,412 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:53:34,412 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 3c138aef98ea77ba30e0a6fb902c0f4d 2024-12-04T06:53:34,412 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 3c138aef98ea77ba30e0a6fb902c0f4d 2024-12-04T06:53:34,414 INFO [StoreOpener-3c138aef98ea77ba30e0a6fb902c0f4d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 3c138aef98ea77ba30e0a6fb902c0f4d 2024-12-04T06:53:34,415 INFO [StoreOpener-3c138aef98ea77ba30e0a6fb902c0f4d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3c138aef98ea77ba30e0a6fb902c0f4d columnFamilyName info 2024-12-04T06:53:34,415 DEBUG [StoreOpener-3c138aef98ea77ba30e0a6fb902c0f4d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:53:34,415 INFO [StoreOpener-3c138aef98ea77ba30e0a6fb902c0f4d-1 {}] regionserver.HStore(327): Store=3c138aef98ea77ba30e0a6fb902c0f4d/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T06:53:34,416 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 3c138aef98ea77ba30e0a6fb902c0f4d 2024-12-04T06:53:34,416 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d 2024-12-04T06:53:34,416 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d 2024-12-04T06:53:34,417 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 3c138aef98ea77ba30e0a6fb902c0f4d 2024-12-04T06:53:34,417 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 3c138aef98ea77ba30e0a6fb902c0f4d 2024-12-04T06:53:34,418 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 3c138aef98ea77ba30e0a6fb902c0f4d 2024-12-04T06:53:34,420 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T06:53:34,420 INFO [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 3c138aef98ea77ba30e0a6fb902c0f4d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=867907, jitterRate=0.10360102355480194}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-04T06:53:34,420 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3c138aef98ea77ba30e0a6fb902c0f4d 2024-12-04T06:53:34,421 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 3c138aef98ea77ba30e0a6fb902c0f4d: Running coprocessor pre-open hook at 1733295214413Writing region info on filesystem at 1733295214413Initializing all the Stores at 1733295214413Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295214413Cleaning up temporary data from old regions at 1733295214417 (+4 ms)Running coprocessor post-open hooks at 1733295214420 (+3 ms)Region opened successfully at 1733295214421 (+1 ms) 2024-12-04T06:53:34,422 INFO [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d., pid=6, masterSystemTime=1733295214408 2024-12-04T06:53:34,424 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d. 2024-12-04T06:53:34,424 INFO [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d. 2024-12-04T06:53:34,425 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=3c138aef98ea77ba30e0a6fb902c0f4d, regionState=OPEN, openSeqNum=2, regionLocation=607fd5c6574c,38689,1733295213243 2024-12-04T06:53:34,427 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3c138aef98ea77ba30e0a6fb902c0f4d, server=607fd5c6574c,38689,1733295213243 because future has completed 2024-12-04T06:53:34,431 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-04T06:53:34,431 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 3c138aef98ea77ba30e0a6fb902c0f4d, server=607fd5c6574c,38689,1733295213243 in 174 msec 2024-12-04T06:53:34,433 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-04T06:53:34,434 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=3c138aef98ea77ba30e0a6fb902c0f4d, ASSIGN in 332 msec 2024-12-04T06:53:34,435 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-04T06:53:34,435 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733295214435"}]},"ts":"1733295214435"} 2024-12-04T06:53:34,437 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-12-04T06:53:34,439 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-04T06:53:34,441 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 367 msec 2024-12-04T06:53:35,090 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:35,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:36,090 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:36,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:37,091 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:37,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:38,092 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:38,265 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-04T06:53:38,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:38,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:38,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:38,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:38,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:38,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:38,284 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:38,284 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:38,284 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:38,285 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:38,285 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:38,285 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:38,289 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:38,289 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:38,289 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:38,292 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:53:38,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:39,092 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:39,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:39,487 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-04T06:53:39,487 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-12-04T06:53:40,093 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:40,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:41,093 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:41,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:42,094 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:42,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:42,763 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-04T06:53:42,763 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-04T06:53:42,763 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-04T06:53:42,763 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-04T06:53:42,764 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-04T06:53:42,764 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-04T06:53:42,764 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-04T06:53:42,764 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-12-04T06:53:43,094 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:43,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:44,095 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:44,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39693 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-04T06:53:44,098 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-04T06:53:44,098 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-12-04T06:53:44,101 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-04T06:53:44,101 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d. 2024-12-04T06:53:44,104 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d., hostname=607fd5c6574c,38689,1733295213243, seqNum=2] 2024-12-04T06:53:44,110 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39693 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-04T06:53:44,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39693 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-04T06:53:44,117 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-04T06:53:44,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39693 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-04T06:53:44,118 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T06:53:44,120 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T06:53:44,280 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38689 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-04T06:53:44,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d. 2024-12-04T06:53:44,281 INFO [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 3c138aef98ea77ba30e0a6fb902c0f4d 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-04T06:53:44,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/.tmp/info/4dcabfb393cc4e73a23549ca5732f91d is 1080, key is row0001/info:/1733295224105/Put/seqid=0 2024-12-04T06:53:44,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44993 is added to blk_1073741837_1013 (size=6033) 2024-12-04T06:53:44,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39183 is added to blk_1073741837_1013 (size=6033) 2024-12-04T06:53:44,303 INFO [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/.tmp/info/4dcabfb393cc4e73a23549ca5732f91d 2024-12-04T06:53:44,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/.tmp/info/4dcabfb393cc4e73a23549ca5732f91d as hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/info/4dcabfb393cc4e73a23549ca5732f91d 2024-12-04T06:53:44,314 INFO [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/info/4dcabfb393cc4e73a23549ca5732f91d, entries=1, sequenceid=5, filesize=5.9 K 2024-12-04T06:53:44,315 INFO [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 3c138aef98ea77ba30e0a6fb902c0f4d in 34ms, sequenceid=5, compaction requested=false 2024-12-04T06:53:44,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 3c138aef98ea77ba30e0a6fb902c0f4d: 2024-12-04T06:53:44,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d. 2024-12-04T06:53:44,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-04T06:53:44,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39693 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-04T06:53:44,323 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-04T06:53:44,323 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 200 msec 2024-12-04T06:53:44,325 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 211 msec 2024-12-04T06:53:44,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:45,095 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:45,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:46,096 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:46,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:47,097 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:47,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:48,097 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:48,327 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:49,098 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:49,327 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:50,098 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:50,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:51,099 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:51,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:52,100 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:52,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:53,100 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:53,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:54,101 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:54,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39693 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-04T06:53:54,208 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-04T06:53:54,211 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39693 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-04T06:53:54,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39693 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-04T06:53:54,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39693 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-04T06:53:54,213 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-04T06:53:54,214 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T06:53:54,214 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T06:53:54,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:54,367 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38689 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-12-04T06:53:54,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d. 2024-12-04T06:53:54,368 INFO [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 3c138aef98ea77ba30e0a6fb902c0f4d 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-04T06:53:54,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/.tmp/info/51699765c8724d0ebe83fbe0b57e15d4 is 1080, key is row0002/info:/1733295234209/Put/seqid=0 2024-12-04T06:53:54,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39183 is added to blk_1073741838_1014 (size=6033) 2024-12-04T06:53:54,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44993 is added to blk_1073741838_1014 (size=6033) 2024-12-04T06:53:54,379 INFO [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/.tmp/info/51699765c8724d0ebe83fbe0b57e15d4 2024-12-04T06:53:54,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/.tmp/info/51699765c8724d0ebe83fbe0b57e15d4 as hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/info/51699765c8724d0ebe83fbe0b57e15d4 2024-12-04T06:53:54,392 INFO [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/info/51699765c8724d0ebe83fbe0b57e15d4, entries=1, sequenceid=9, filesize=5.9 K 2024-12-04T06:53:54,393 INFO [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 3c138aef98ea77ba30e0a6fb902c0f4d in 25ms, sequenceid=9, compaction requested=false 2024-12-04T06:53:54,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 3c138aef98ea77ba30e0a6fb902c0f4d: 2024-12-04T06:53:54,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d. 2024-12-04T06:53:54,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-12-04T06:53:54,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39693 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-12-04T06:53:54,397 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-12-04T06:53:54,397 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 181 msec 2024-12-04T06:53:54,400 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 187 msec 2024-12-04T06:53:55,101 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:55,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:56,102 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:56,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:57,103 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:57,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:58,103 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:58,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:59,104 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:59,104 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 after 68048ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:53:59,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:53:59,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta after 68040ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T06:54:00,105 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:00,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:01,105 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:01,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:02,106 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:02,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:03,106 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:03,170 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-04T06:54:03,336 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:04,107 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:04,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39693 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-04T06:54:04,298 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-04T06:54:04,301 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C38689%2C1733295213243.1733295244301 2024-12-04T06:54:04,307 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:54:04,308 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:54:04,308 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:54:04,308 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:54:04,308 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:54:04,308 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/WALs/607fd5c6574c,38689,1733295213243/607fd5c6574c%2C38689%2C1733295213243.1733295213625 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/WALs/607fd5c6574c,38689,1733295213243/607fd5c6574c%2C38689%2C1733295213243.1733295244301 2024-12-04T06:54:04,309 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45059:45059),(127.0.0.1/127.0.0.1:42445:42445)] 2024-12-04T06:54:04,309 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/WALs/607fd5c6574c,38689,1733295213243/607fd5c6574c%2C38689%2C1733295213243.1733295213625 is not closed yet, will try archiving it next time 2024-12-04T06:54:04,310 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39693 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-04T06:54:04,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44993 is added to blk_1073741833_1009 (size=5546) 2024-12-04T06:54:04,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39693 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-04T06:54:04,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39693 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-04T06:54:04,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39183 is added to blk_1073741833_1009 (size=5546) 2024-12-04T06:54:04,314 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-04T06:54:04,315 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T06:54:04,315 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T06:54:04,336 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:04,468 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38689 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-12-04T06:54:04,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d. 2024-12-04T06:54:04,469 INFO [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 3c138aef98ea77ba30e0a6fb902c0f4d 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-04T06:54:04,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/.tmp/info/9a8d93017ca94ad0a1b203cab740361d is 1080, key is row0003/info:/1733295244299/Put/seqid=0 2024-12-04T06:54:04,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39183 is added to blk_1073741840_1016 (size=6033) 2024-12-04T06:54:04,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44993 is added to blk_1073741840_1016 (size=6033) 2024-12-04T06:54:04,486 INFO [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/.tmp/info/9a8d93017ca94ad0a1b203cab740361d 2024-12-04T06:54:04,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/.tmp/info/9a8d93017ca94ad0a1b203cab740361d as hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/info/9a8d93017ca94ad0a1b203cab740361d 2024-12-04T06:54:04,499 INFO [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/info/9a8d93017ca94ad0a1b203cab740361d, entries=1, sequenceid=13, filesize=5.9 K 2024-12-04T06:54:04,500 INFO [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 3c138aef98ea77ba30e0a6fb902c0f4d in 31ms, sequenceid=13, compaction requested=true 2024-12-04T06:54:04,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 3c138aef98ea77ba30e0a6fb902c0f4d: 2024-12-04T06:54:04,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d. 2024-12-04T06:54:04,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-12-04T06:54:04,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39693 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-12-04T06:54:04,505 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-12-04T06:54:04,505 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 187 msec 2024-12-04T06:54:04,508 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 196 msec 2024-12-04T06:54:05,108 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:05,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:06,109 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:06,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:07,109 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:07,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:08,110 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:08,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:09,110 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:09,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:10,111 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:10,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:11,112 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:11,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:12,112 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:12,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:13,113 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:13,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:14,060 INFO [master/607fd5c6574c:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-04T06:54:14,061 INFO [master/607fd5c6574c:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-04T06:54:14,113 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:14,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:14,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39693 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-04T06:54:14,379 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-04T06:54:14,379 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T06:54:14,380 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T06:54:14,380 DEBUG [Time-limited test {}] regionserver.HStore(1541): 3c138aef98ea77ba30e0a6fb902c0f4d/info is initiating minor compaction (all files) 2024-12-04T06:54:14,380 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-04T06:54:14,381 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T06:54:14,381 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 3c138aef98ea77ba30e0a6fb902c0f4d/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d. 2024-12-04T06:54:14,381 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/info/4dcabfb393cc4e73a23549ca5732f91d, hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/info/51699765c8724d0ebe83fbe0b57e15d4, hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/info/9a8d93017ca94ad0a1b203cab740361d] into tmpdir=hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/.tmp, totalSize=17.7 K 2024-12-04T06:54:14,381 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 4dcabfb393cc4e73a23549ca5732f91d, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1733295224105 2024-12-04T06:54:14,382 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 51699765c8724d0ebe83fbe0b57e15d4, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1733295234209 2024-12-04T06:54:14,382 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 9a8d93017ca94ad0a1b203cab740361d, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733295244299 2024-12-04T06:54:14,398 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 3c138aef98ea77ba30e0a6fb902c0f4d#info#compaction#43 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T06:54:14,399 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/.tmp/info/21f469e1a61e441b98383d707389db14 is 1080, key is row0001/info:/1733295224105/Put/seqid=0 2024-12-04T06:54:14,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39183 is added to blk_1073741841_1017 (size=8296) 2024-12-04T06:54:14,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44993 is added to blk_1073741841_1017 (size=8296) 2024-12-04T06:54:14,422 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/.tmp/info/21f469e1a61e441b98383d707389db14 as hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/info/21f469e1a61e441b98383d707389db14 2024-12-04T06:54:14,430 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 3c138aef98ea77ba30e0a6fb902c0f4d/info of 3c138aef98ea77ba30e0a6fb902c0f4d into 21f469e1a61e441b98383d707389db14(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T06:54:14,430 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 3c138aef98ea77ba30e0a6fb902c0f4d: 2024-12-04T06:54:14,433 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C38689%2C1733295213243.1733295254433 2024-12-04T06:54:14,441 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:54:14,442 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:54:14,442 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:54:14,442 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:54:14,442 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:54:14,442 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/WALs/607fd5c6574c,38689,1733295213243/607fd5c6574c%2C38689%2C1733295213243.1733295244301 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/WALs/607fd5c6574c,38689,1733295213243/607fd5c6574c%2C38689%2C1733295213243.1733295254433 2024-12-04T06:54:14,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44993 is added to blk_1073741839_1015 (size=2520) 2024-12-04T06:54:14,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39183 is added to blk_1073741839_1015 (size=2520) 2024-12-04T06:54:14,453 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/WALs/607fd5c6574c,38689,1733295213243/607fd5c6574c%2C38689%2C1733295213243.1733295213625 to hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/oldWALs/607fd5c6574c%2C38689%2C1733295213243.1733295213625 2024-12-04T06:54:14,460 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45059:45059),(127.0.0.1/127.0.0.1:42445:42445)] 2024-12-04T06:54:14,461 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39693 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-04T06:54:14,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39693 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-04T06:54:14,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39693 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-04T06:54:14,464 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-04T06:54:14,465 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T06:54:14,465 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T06:54:14,619 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38689 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-12-04T06:54:14,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d. 2024-12-04T06:54:14,619 INFO [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 3c138aef98ea77ba30e0a6fb902c0f4d 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-04T06:54:14,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/.tmp/info/c8a3bed1cee14ac1a68ba9100aa6b848 is 1080, key is row0000/info:/1733295254431/Put/seqid=0 2024-12-04T06:54:14,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44993 is added to blk_1073741843_1019 (size=6033) 2024-12-04T06:54:14,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39183 is added to blk_1073741843_1019 (size=6033) 2024-12-04T06:54:14,630 INFO [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/.tmp/info/c8a3bed1cee14ac1a68ba9100aa6b848 2024-12-04T06:54:14,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/.tmp/info/c8a3bed1cee14ac1a68ba9100aa6b848 as hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/info/c8a3bed1cee14ac1a68ba9100aa6b848 2024-12-04T06:54:14,643 INFO [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/info/c8a3bed1cee14ac1a68ba9100aa6b848, entries=1, sequenceid=18, filesize=5.9 K 2024-12-04T06:54:14,644 INFO [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 3c138aef98ea77ba30e0a6fb902c0f4d in 25ms, sequenceid=18, compaction requested=false 2024-12-04T06:54:14,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 3c138aef98ea77ba30e0a6fb902c0f4d: 2024-12-04T06:54:14,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d. 2024-12-04T06:54:14,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-12-04T06:54:14,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39693 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-12-04T06:54:14,649 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-12-04T06:54:14,649 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 181 msec 2024-12-04T06:54:14,652 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 190 msec 2024-12-04T06:54:15,114 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:15,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:16,115 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:16,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:17,115 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:17,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:18,116 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:18,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:19,117 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:19,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:19,412 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 3c138aef98ea77ba30e0a6fb902c0f4d, had cached 0 bytes from a total of 14329 2024-12-04T06:54:20,117 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:20,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:21,118 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:21,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:22,118 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:22,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:23,119 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:23,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:24,120 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:24,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:24,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39693 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-04T06:54:24,529 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-04T06:54:24,533 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C38689%2C1733295213243.1733295264533 2024-12-04T06:54:24,540 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:54:24,540 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:54:24,540 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:54:24,540 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:54:24,540 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:54:24,540 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/WALs/607fd5c6574c,38689,1733295213243/607fd5c6574c%2C38689%2C1733295213243.1733295254433 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/WALs/607fd5c6574c,38689,1733295213243/607fd5c6574c%2C38689%2C1733295213243.1733295264533 2024-12-04T06:54:24,541 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45059:45059),(127.0.0.1/127.0.0.1:42445:42445)] 2024-12-04T06:54:24,541 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/WALs/607fd5c6574c,38689,1733295213243/607fd5c6574c%2C38689%2C1733295213243.1733295254433 is not closed yet, will try archiving it next time 2024-12-04T06:54:24,542 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/WALs/607fd5c6574c,38689,1733295213243/607fd5c6574c%2C38689%2C1733295213243.1733295244301 to hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/oldWALs/607fd5c6574c%2C38689%2C1733295213243.1733295244301 2024-12-04T06:54:24,542 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-04T06:54:24,542 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-04T06:54:24,542 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T06:54:24,542 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:54:24,542 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:54:24,542 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-04T06:54:24,542 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=597330653, stopped=false 2024-12-04T06:54:24,542 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=607fd5c6574c,39693,1733295213186 2024-12-04T06:54:24,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44993 is added to blk_1073741842_1018 (size=2026) 2024-12-04T06:54:24,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39183 is added to blk_1073741842_1018 (size=2026) 2024-12-04T06:54:24,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39693-0x1017c3ff5040000, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T06:54:24,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38689-0x1017c3ff5040001, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T06:54:24,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39693-0x1017c3ff5040000, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:54:24,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38689-0x1017c3ff5040001, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:54:24,545 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-04T06:54:24,545 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-04T06:54:24,545 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38689-0x1017c3ff5040001, quorum=127.0.0.1:55528, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T06:54:24,545 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-04T06:54:24,545 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39693-0x1017c3ff5040000, quorum=127.0.0.1:55528, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T06:54:24,545 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T06:54:24,545 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:54:24,545 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '607fd5c6574c,38689,1733295213243' ***** 2024-12-04T06:54:24,545 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-04T06:54:24,546 INFO [RS:0;607fd5c6574c:38689 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-04T06:54:24,546 INFO [RS:0;607fd5c6574c:38689 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-04T06:54:24,546 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-04T06:54:24,546 INFO [RS:0;607fd5c6574c:38689 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-04T06:54:24,546 INFO [RS:0;607fd5c6574c:38689 {}] regionserver.HRegionServer(3091): Received CLOSE for 3c138aef98ea77ba30e0a6fb902c0f4d 2024-12-04T06:54:24,546 INFO [RS:0;607fd5c6574c:38689 {}] regionserver.HRegionServer(959): stopping server 607fd5c6574c,38689,1733295213243 2024-12-04T06:54:24,547 INFO [RS:0;607fd5c6574c:38689 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T06:54:24,547 INFO [RS:0;607fd5c6574c:38689 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;607fd5c6574c:38689. 2024-12-04T06:54:24,547 DEBUG [RS:0;607fd5c6574c:38689 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T06:54:24,547 DEBUG [RS:0;607fd5c6574c:38689 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:54:24,547 INFO [RS:0;607fd5c6574c:38689 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-04T06:54:24,547 INFO [RS:0;607fd5c6574c:38689 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-04T06:54:24,547 INFO [RS:0;607fd5c6574c:38689 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-04T06:54:24,547 INFO [RS:0;607fd5c6574c:38689 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-04T06:54:24,547 INFO [RS:0;607fd5c6574c:38689 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-04T06:54:24,547 DEBUG [RS:0;607fd5c6574c:38689 {}] regionserver.HRegionServer(1325): Online Regions={3c138aef98ea77ba30e0a6fb902c0f4d=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d., 1588230740=hbase:meta,,1.1588230740} 2024-12-04T06:54:24,547 DEBUG [RS:0;607fd5c6574c:38689 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 3c138aef98ea77ba30e0a6fb902c0f4d 2024-12-04T06:54:24,547 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-04T06:54:24,548 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-04T06:54:24,548 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-04T06:54:24,548 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-04T06:54:24,548 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 3c138aef98ea77ba30e0a6fb902c0f4d, disabling compactions & flushes 2024-12-04T06:54:24,548 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-04T06:54:24,548 INFO [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d. 2024-12-04T06:54:24,548 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d. 2024-12-04T06:54:24,548 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d. after waiting 0 ms 2024-12-04T06:54:24,548 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-12-04T06:54:24,548 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d. 2024-12-04T06:54:24,548 INFO [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 3c138aef98ea77ba30e0a6fb902c0f4d 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-04T06:54:24,554 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/.tmp/info/cd9b39a95dba420a94ef20c4ad0a0ff7 is 1080, key is row0001/info:/1733295264531/Put/seqid=0 2024-12-04T06:54:24,572 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/hbase/meta/1588230740/.tmp/info/b9502dee919c4687a9c628b76cf50862 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d./info:regioninfo/1733295214425/Put/seqid=0 2024-12-04T06:54:24,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39183 is added to blk_1073741845_1021 (size=6033) 2024-12-04T06:54:24,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44993 is added to blk_1073741845_1021 (size=6033) 2024-12-04T06:54:24,576 INFO [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/.tmp/info/cd9b39a95dba420a94ef20c4ad0a0ff7 2024-12-04T06:54:24,589 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/.tmp/info/cd9b39a95dba420a94ef20c4ad0a0ff7 as hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/info/cd9b39a95dba420a94ef20c4ad0a0ff7 2024-12-04T06:54:24,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39183 is added to blk_1073741846_1022 (size=7308) 2024-12-04T06:54:24,592 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/hbase/meta/1588230740/.tmp/info/b9502dee919c4687a9c628b76cf50862 2024-12-04T06:54:24,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44993 is added to blk_1073741846_1022 (size=7308) 2024-12-04T06:54:24,602 INFO [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/info/cd9b39a95dba420a94ef20c4ad0a0ff7, entries=1, sequenceid=22, filesize=5.9 K 2024-12-04T06:54:24,607 INFO [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 3c138aef98ea77ba30e0a6fb902c0f4d in 59ms, sequenceid=22, compaction requested=true 2024-12-04T06:54:24,608 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/info/4dcabfb393cc4e73a23549ca5732f91d, hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/info/51699765c8724d0ebe83fbe0b57e15d4, hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/info/9a8d93017ca94ad0a1b203cab740361d] to archive 2024-12-04T06:54:24,610 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-04T06:54:24,612 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/info/4dcabfb393cc4e73a23549ca5732f91d to hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/info/4dcabfb393cc4e73a23549ca5732f91d 2024-12-04T06:54:24,614 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/info/51699765c8724d0ebe83fbe0b57e15d4 to hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/info/51699765c8724d0ebe83fbe0b57e15d4 2024-12-04T06:54:24,616 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/info/9a8d93017ca94ad0a1b203cab740361d to hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/info/9a8d93017ca94ad0a1b203cab740361d 2024-12-04T06:54:24,616 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=607fd5c6574c:39693 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-04T06:54:24,617 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [4dcabfb393cc4e73a23549ca5732f91d=6033, 51699765c8724d0ebe83fbe0b57e15d4=6033, 9a8d93017ca94ad0a1b203cab740361d=6033] 2024-12-04T06:54:24,617 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/hbase/meta/1588230740/.tmp/ns/72e6ecdc59d94d468085760928e7a883 is 43, key is default/ns:d/1733295214045/Put/seqid=0 2024-12-04T06:54:24,625 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3c138aef98ea77ba30e0a6fb902c0f4d/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-12-04T06:54:24,626 INFO [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d. 2024-12-04T06:54:24,626 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 3c138aef98ea77ba30e0a6fb902c0f4d: Waiting for close lock at 1733295264548Running coprocessor pre-close hooks at 1733295264548Disabling compacts and flushes for region at 1733295264548Disabling writes for close at 1733295264548Obtaining lock to block concurrent updates at 1733295264548Preparing flush snapshotting stores in 3c138aef98ea77ba30e0a6fb902c0f4d at 1733295264548Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1733295264548Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d. at 1733295264549 (+1 ms)Flushing 3c138aef98ea77ba30e0a6fb902c0f4d/info: creating writer at 1733295264549Flushing 3c138aef98ea77ba30e0a6fb902c0f4d/info: appending metadata at 1733295264553 (+4 ms)Flushing 3c138aef98ea77ba30e0a6fb902c0f4d/info: closing flushed file at 1733295264553Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7f419d6a: reopening flushed file at 1733295264587 (+34 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 3c138aef98ea77ba30e0a6fb902c0f4d in 59ms, sequenceid=22, compaction requested=true at 1733295264607 (+20 ms)Writing region close event to WAL at 1733295264620 (+13 ms)Running coprocessor post-close hooks at 1733295264626 (+6 ms)Closed at 1733295264626 2024-12-04T06:54:24,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44993 is added to blk_1073741847_1023 (size=5153) 2024-12-04T06:54:24,626 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733295214072.3c138aef98ea77ba30e0a6fb902c0f4d. 2024-12-04T06:54:24,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39183 is added to blk_1073741847_1023 (size=5153) 2024-12-04T06:54:24,627 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/hbase/meta/1588230740/.tmp/ns/72e6ecdc59d94d468085760928e7a883 2024-12-04T06:54:24,652 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/hbase/meta/1588230740/.tmp/table/d3f8a7693f4647bc9e3bf73aa44722f4 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1733295214435/Put/seqid=0 2024-12-04T06:54:24,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39183 is added to blk_1073741848_1024 (size=5508) 2024-12-04T06:54:24,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44993 is added to blk_1073741848_1024 (size=5508) 2024-12-04T06:54:24,661 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/hbase/meta/1588230740/.tmp/table/d3f8a7693f4647bc9e3bf73aa44722f4 2024-12-04T06:54:24,667 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/hbase/meta/1588230740/.tmp/info/b9502dee919c4687a9c628b76cf50862 as hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/hbase/meta/1588230740/info/b9502dee919c4687a9c628b76cf50862 2024-12-04T06:54:24,673 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/hbase/meta/1588230740/info/b9502dee919c4687a9c628b76cf50862, entries=10, sequenceid=11, filesize=7.1 K 2024-12-04T06:54:24,674 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/hbase/meta/1588230740/.tmp/ns/72e6ecdc59d94d468085760928e7a883 as hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/hbase/meta/1588230740/ns/72e6ecdc59d94d468085760928e7a883 2024-12-04T06:54:24,680 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/hbase/meta/1588230740/ns/72e6ecdc59d94d468085760928e7a883, entries=2, sequenceid=11, filesize=5.0 K 2024-12-04T06:54:24,681 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/hbase/meta/1588230740/.tmp/table/d3f8a7693f4647bc9e3bf73aa44722f4 as hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/hbase/meta/1588230740/table/d3f8a7693f4647bc9e3bf73aa44722f4 2024-12-04T06:54:24,689 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/hbase/meta/1588230740/table/d3f8a7693f4647bc9e3bf73aa44722f4, entries=2, sequenceid=11, filesize=5.4 K 2024-12-04T06:54:24,690 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 142ms, sequenceid=11, compaction requested=false 2024-12-04T06:54:24,695 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-04T06:54:24,695 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-04T06:54:24,695 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-04T06:54:24,695 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733295264547Running coprocessor pre-close hooks at 1733295264547Disabling compacts and flushes for region at 1733295264547Disabling writes for close at 1733295264548 (+1 ms)Obtaining lock to block concurrent updates at 1733295264548Preparing flush snapshotting stores in 1588230740 at 1733295264548Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1733295264549 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733295264549Flushing 1588230740/info: creating writer at 1733295264549Flushing 1588230740/info: appending metadata at 1733295264571 (+22 ms)Flushing 1588230740/info: closing flushed file at 1733295264571Flushing 1588230740/ns: creating writer at 1733295264599 (+28 ms)Flushing 1588230740/ns: appending metadata at 1733295264617 (+18 ms)Flushing 1588230740/ns: closing flushed file at 1733295264617Flushing 1588230740/table: creating writer at 1733295264633 (+16 ms)Flushing 1588230740/table: appending metadata at 1733295264651 (+18 ms)Flushing 1588230740/table: closing flushed file at 1733295264651Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1e60efe0: reopening flushed file at 1733295264666 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4959c50: reopening flushed file at 1733295264673 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@20dfbc7a: reopening flushed file at 1733295264680 (+7 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 142ms, sequenceid=11, compaction requested=false at 1733295264690 (+10 ms)Writing region close event to WAL at 1733295264692 (+2 ms)Running coprocessor post-close hooks at 1733295264695 (+3 ms)Closed at 1733295264695 2024-12-04T06:54:24,696 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-04T06:54:24,747 INFO [RS:0;607fd5c6574c:38689 {}] regionserver.HRegionServer(976): stopping server 607fd5c6574c,38689,1733295213243; all regions closed. 2024-12-04T06:54:24,748 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:54:24,748 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:54:24,748 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:54:24,748 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:54:24,748 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:54:24,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44993 is added to blk_1073741834_1010 (size=3306) 2024-12-04T06:54:24,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39183 is added to blk_1073741834_1010 (size=3306) 2024-12-04T06:54:24,754 DEBUG [RS:0;607fd5c6574c:38689 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/oldWALs 2024-12-04T06:54:24,754 INFO [RS:0;607fd5c6574c:38689 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 607fd5c6574c%2C38689%2C1733295213243.meta:.meta(num 1733295214003) 2024-12-04T06:54:24,755 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:54:24,755 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:54:24,755 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:54:24,755 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:54:24,755 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:54:24,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44993 is added to blk_1073741844_1020 (size=1252) 2024-12-04T06:54:24,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39183 is added to blk_1073741844_1020 (size=1252) 2024-12-04T06:54:24,760 DEBUG [RS:0;607fd5c6574c:38689 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/oldWALs 2024-12-04T06:54:24,760 INFO [RS:0;607fd5c6574c:38689 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 607fd5c6574c%2C38689%2C1733295213243:(num 1733295264533) 2024-12-04T06:54:24,760 DEBUG [RS:0;607fd5c6574c:38689 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:54:24,760 INFO [RS:0;607fd5c6574c:38689 {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T06:54:24,760 INFO [RS:0;607fd5c6574c:38689 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T06:54:24,761 INFO [RS:0;607fd5c6574c:38689 {}] hbase.ChoreService(370): Chore service for: regionserver/607fd5c6574c:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-04T06:54:24,761 INFO [RS:0;607fd5c6574c:38689 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T06:54:24,761 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T06:54:24,761 INFO [RS:0;607fd5c6574c:38689 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38689 2024-12-04T06:54:24,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39693-0x1017c3ff5040000, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T06:54:24,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38689-0x1017c3ff5040001, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/607fd5c6574c,38689,1733295213243 2024-12-04T06:54:24,763 INFO [RS:0;607fd5c6574c:38689 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T06:54:24,764 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [607fd5c6574c,38689,1733295213243] 2024-12-04T06:54:24,767 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/607fd5c6574c,38689,1733295213243 already deleted, retry=false 2024-12-04T06:54:24,767 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 607fd5c6574c,38689,1733295213243 expired; onlineServers=0 2024-12-04T06:54:24,767 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '607fd5c6574c,39693,1733295213186' ***** 2024-12-04T06:54:24,767 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-04T06:54:24,767 INFO [M:0;607fd5c6574c:39693 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T06:54:24,767 INFO [M:0;607fd5c6574c:39693 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T06:54:24,767 DEBUG [M:0;607fd5c6574c:39693 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-04T06:54:24,767 DEBUG [M:0;607fd5c6574c:39693 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-04T06:54:24,767 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-04T06:54:24,767 DEBUG [master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.large.0-1733295213402 {}] cleaner.HFileCleaner(306): Exit Thread[master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.large.0-1733295213402,5,FailOnTimeoutGroup] 2024-12-04T06:54:24,767 DEBUG [master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.small.0-1733295213402 {}] cleaner.HFileCleaner(306): Exit Thread[master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.small.0-1733295213402,5,FailOnTimeoutGroup] 2024-12-04T06:54:24,768 INFO [M:0;607fd5c6574c:39693 {}] hbase.ChoreService(370): Chore service for: master/607fd5c6574c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-04T06:54:24,768 INFO [M:0;607fd5c6574c:39693 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T06:54:24,768 DEBUG [M:0;607fd5c6574c:39693 {}] master.HMaster(1795): Stopping service threads 2024-12-04T06:54:24,768 INFO [M:0;607fd5c6574c:39693 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-04T06:54:24,768 INFO [M:0;607fd5c6574c:39693 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-04T06:54:24,768 INFO [M:0;607fd5c6574c:39693 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-04T06:54:24,768 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-04T06:54:24,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39693-0x1017c3ff5040000, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-04T06:54:24,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39693-0x1017c3ff5040000, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:54:24,770 DEBUG [M:0;607fd5c6574c:39693 {}] zookeeper.ZKUtil(347): master:39693-0x1017c3ff5040000, quorum=127.0.0.1:55528, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-04T06:54:24,770 WARN [M:0;607fd5c6574c:39693 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-04T06:54:24,770 INFO [M:0;607fd5c6574c:39693 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/.lastflushedseqids 2024-12-04T06:54:24,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44993 is added to blk_1073741849_1025 (size=130) 2024-12-04T06:54:24,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39183 is added to blk_1073741849_1025 (size=130) 2024-12-04T06:54:24,784 INFO [M:0;607fd5c6574c:39693 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-04T06:54:24,784 INFO [M:0;607fd5c6574c:39693 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-04T06:54:24,784 DEBUG [M:0;607fd5c6574c:39693 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-04T06:54:24,784 INFO [M:0;607fd5c6574c:39693 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:54:24,784 DEBUG [M:0;607fd5c6574c:39693 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:54:24,784 DEBUG [M:0;607fd5c6574c:39693 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-04T06:54:24,784 DEBUG [M:0;607fd5c6574c:39693 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:54:24,784 INFO [M:0;607fd5c6574c:39693 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.57 KB heapSize=54.98 KB 2024-12-04T06:54:24,809 DEBUG [M:0;607fd5c6574c:39693 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4425b89a1f2c4b5988a5de4e554018e1 is 82, key is hbase:meta,,1/info:regioninfo/1733295214027/Put/seqid=0 2024-12-04T06:54:24,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39183 is added to blk_1073741850_1026 (size=5672) 2024-12-04T06:54:24,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44993 is added to blk_1073741850_1026 (size=5672) 2024-12-04T06:54:24,819 INFO [M:0;607fd5c6574c:39693 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4425b89a1f2c4b5988a5de4e554018e1 2024-12-04T06:54:24,843 DEBUG [M:0;607fd5c6574c:39693 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7b802fbc8fea41928711f152dc8d6e48 is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733295214440/Put/seqid=0 2024-12-04T06:54:24,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44993 is added to blk_1073741851_1027 (size=7821) 2024-12-04T06:54:24,863 INFO [M:0;607fd5c6574c:39693 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.97 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7b802fbc8fea41928711f152dc8d6e48 2024-12-04T06:54:24,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39183 is added to blk_1073741851_1027 (size=7821) 2024-12-04T06:54:24,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38689-0x1017c3ff5040001, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T06:54:24,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38689-0x1017c3ff5040001, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T06:54:24,866 INFO [RS:0;607fd5c6574c:38689 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T06:54:24,866 INFO [RS:0;607fd5c6574c:38689 {}] regionserver.HRegionServer(1031): Exiting; stopping=607fd5c6574c,38689,1733295213243; zookeeper connection closed. 2024-12-04T06:54:24,871 INFO [M:0;607fd5c6574c:39693 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 7b802fbc8fea41928711f152dc8d6e48 2024-12-04T06:54:24,876 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1b2a83e4 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1b2a83e4 2024-12-04T06:54:24,877 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-04T06:54:24,899 DEBUG [M:0;607fd5c6574c:39693 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7d1a4d896d6043f3ad775723ad5d15d2 is 69, key is 607fd5c6574c,38689,1733295213243/rs:state/1733295213479/Put/seqid=0 2024-12-04T06:54:24,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44993 is added to blk_1073741852_1028 (size=5156) 2024-12-04T06:54:24,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39183 is added to blk_1073741852_1028 (size=5156) 2024-12-04T06:54:24,909 INFO [M:0;607fd5c6574c:39693 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7d1a4d896d6043f3ad775723ad5d15d2 2024-12-04T06:54:24,940 DEBUG [M:0;607fd5c6574c:39693 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/292fdcfe55064dddb5e6083c42f05ffd is 52, key is load_balancer_on/state:d/1733295214068/Put/seqid=0 2024-12-04T06:54:24,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44993 is added to blk_1073741853_1029 (size=5056) 2024-12-04T06:54:24,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39183 is added to blk_1073741853_1029 (size=5056) 2024-12-04T06:54:24,946 INFO [M:0;607fd5c6574c:39693 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/292fdcfe55064dddb5e6083c42f05ffd 2024-12-04T06:54:24,952 DEBUG [M:0;607fd5c6574c:39693 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4425b89a1f2c4b5988a5de4e554018e1 as hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4425b89a1f2c4b5988a5de4e554018e1 2024-12-04T06:54:24,959 INFO [M:0;607fd5c6574c:39693 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4425b89a1f2c4b5988a5de4e554018e1, entries=8, sequenceid=121, filesize=5.5 K 2024-12-04T06:54:24,960 DEBUG [M:0;607fd5c6574c:39693 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7b802fbc8fea41928711f152dc8d6e48 as hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7b802fbc8fea41928711f152dc8d6e48 2024-12-04T06:54:24,965 INFO [M:0;607fd5c6574c:39693 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 7b802fbc8fea41928711f152dc8d6e48 2024-12-04T06:54:24,965 INFO [M:0;607fd5c6574c:39693 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7b802fbc8fea41928711f152dc8d6e48, entries=14, sequenceid=121, filesize=7.6 K 2024-12-04T06:54:24,966 DEBUG [M:0;607fd5c6574c:39693 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7d1a4d896d6043f3ad775723ad5d15d2 as hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7d1a4d896d6043f3ad775723ad5d15d2 2024-12-04T06:54:24,971 INFO [M:0;607fd5c6574c:39693 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7d1a4d896d6043f3ad775723ad5d15d2, entries=1, sequenceid=121, filesize=5.0 K 2024-12-04T06:54:24,973 DEBUG [M:0;607fd5c6574c:39693 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/292fdcfe55064dddb5e6083c42f05ffd as hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/292fdcfe55064dddb5e6083c42f05ffd 2024-12-04T06:54:24,980 INFO [M:0;607fd5c6574c:39693 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40177/user/jenkins/test-data/1bbeacef-f256-670c-e263-e60cacf9a692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/292fdcfe55064dddb5e6083c42f05ffd, entries=1, sequenceid=121, filesize=4.9 K 2024-12-04T06:54:24,981 INFO [M:0;607fd5c6574c:39693 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.57 KB/44620, heapSize ~54.92 KB/56240, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 197ms, sequenceid=121, compaction requested=false 2024-12-04T06:54:24,982 INFO [M:0;607fd5c6574c:39693 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:54:24,982 DEBUG [M:0;607fd5c6574c:39693 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733295264784Disabling compacts and flushes for region at 1733295264784Disabling writes for close at 1733295264784Obtaining lock to block concurrent updates at 1733295264784Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733295264784Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44620, getHeapSize=56240, getOffHeapSize=0, getCellsCount=140 at 1733295264785 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733295264786 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733295264786Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733295264808 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733295264808Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733295264825 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733295264842 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733295264842Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733295264871 (+29 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733295264898 (+27 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733295264898Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733295264916 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733295264940 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733295264940Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2afe0061: reopening flushed file at 1733295264951 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@40c8f082: reopening flushed file at 1733295264959 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6b4a7c4f: reopening flushed file at 1733295264965 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42200d10: reopening flushed file at 1733295264972 (+7 ms)Finished flush of dataSize ~43.57 KB/44620, heapSize ~54.92 KB/56240, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 197ms, sequenceid=121, compaction requested=false at 1733295264981 (+9 ms)Writing region close event to WAL at 1733295264982 (+1 ms)Closed at 1733295264982 2024-12-04T06:54:24,983 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:54:24,983 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:54:24,983 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:54:24,983 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:54:24,983 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:54:24,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39183 is added to blk_1073741830_1006 (size=53017) 2024-12-04T06:54:24,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44993 is added to blk_1073741830_1006 (size=53017) 2024-12-04T06:54:24,986 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T06:54:24,986 INFO [M:0;607fd5c6574c:39693 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-04T06:54:24,986 INFO [M:0;607fd5c6574c:39693 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39693 2024-12-04T06:54:24,986 INFO [M:0;607fd5c6574c:39693 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T06:54:25,089 INFO [M:0;607fd5c6574c:39693 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T06:54:25,089 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39693-0x1017c3ff5040000, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T06:54:25,089 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39693-0x1017c3ff5040000, quorum=127.0.0.1:55528, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T06:54:25,098 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@39212263{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:54:25,098 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@10128232{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T06:54:25,098 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T06:54:25,098 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4cf5e3df{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T06:54:25,098 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46a495b0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/hadoop.log.dir/,STOPPED} 2024-12-04T06:54:25,100 WARN [BP-1483678462-172.17.0.2-1733295212521 heartbeating to localhost/127.0.0.1:40177 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T06:54:25,100 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T06:54:25,100 WARN [BP-1483678462-172.17.0.2-1733295212521 heartbeating to localhost/127.0.0.1:40177 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1483678462-172.17.0.2-1733295212521 (Datanode Uuid 03209b56-a62b-4b23-8b43-952e8b483d15) service to localhost/127.0.0.1:40177 2024-12-04T06:54:25,100 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T06:54:25,101 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/cluster_46a0e604-073b-7e24-162c-13fc0a4e392a/data/data3/current/BP-1483678462-172.17.0.2-1733295212521 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:54:25,101 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/cluster_46a0e604-073b-7e24-162c-13fc0a4e392a/data/data4/current/BP-1483678462-172.17.0.2-1733295212521 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:54:25,102 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T06:54:25,105 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2ac0122b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:54:25,106 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@648e3649{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T06:54:25,106 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T06:54:25,106 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5a296252{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T06:54:25,106 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6eebb3dc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/hadoop.log.dir/,STOPPED} 2024-12-04T06:54:25,108 WARN [BP-1483678462-172.17.0.2-1733295212521 heartbeating to localhost/127.0.0.1:40177 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T06:54:25,108 WARN [BP-1483678462-172.17.0.2-1733295212521 heartbeating to localhost/127.0.0.1:40177 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1483678462-172.17.0.2-1733295212521 (Datanode Uuid 97848155-f0a3-45d7-bb2a-8af608a32d39) service to localhost/127.0.0.1:40177 2024-12-04T06:54:25,108 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T06:54:25,108 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T06:54:25,109 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/cluster_46a0e604-073b-7e24-162c-13fc0a4e392a/data/data1/current/BP-1483678462-172.17.0.2-1733295212521 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:54:25,109 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/cluster_46a0e604-073b-7e24-162c-13fc0a4e392a/data/data2/current/BP-1483678462-172.17.0.2-1733295212521 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:54:25,109 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T06:54:25,117 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@701842fe{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-04T06:54:25,117 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@785f0d23{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T06:54:25,117 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T06:54:25,118 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@63f38dfa{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T06:54:25,118 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62e6de45{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/hadoop.log.dir/,STOPPED} 2024-12-04T06:54:25,120 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:25,125 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-04T06:54:25,147 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-04T06:54:25,157 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=208 (was 181) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:40177 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/607fd5c6574c:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40177 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40177 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40177 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40177 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40177 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40177 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40177 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40177 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=485 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=258 (was 297), ProcessCount=11 (was 11), AvailableMemoryMB=5842 (was 6382) 2024-12-04T06:54:25,166 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=208, OpenFileDescriptor=485, MaxFileDescriptor=1048576, SystemLoadAverage=258, ProcessCount=11, AvailableMemoryMB=5841 2024-12-04T06:54:25,166 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-04T06:54:25,166 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/hadoop.log.dir so I do NOT create it in target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d 2024-12-04T06:54:25,166 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/acc62485-1803-af48-b138-179f3dc5d05a/hadoop.tmp.dir so I do NOT create it in target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d 2024-12-04T06:54:25,166 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/cluster_6d60fc30-8bea-1b14-0596-45cf526269fc, deleteOnExit=true 2024-12-04T06:54:25,166 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-04T06:54:25,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/test.cache.data in system properties and HBase conf 2024-12-04T06:54:25,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/hadoop.tmp.dir in system properties and HBase conf 2024-12-04T06:54:25,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/hadoop.log.dir in system properties and HBase conf 2024-12-04T06:54:25,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-04T06:54:25,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-04T06:54:25,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-04T06:54:25,167 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-04T06:54:25,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-04T06:54:25,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-04T06:54:25,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-04T06:54:25,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-04T06:54:25,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-04T06:54:25,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-04T06:54:25,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-04T06:54:25,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-04T06:54:25,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-04T06:54:25,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/nfs.dump.dir in system properties and HBase conf 2024-12-04T06:54:25,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/java.io.tmpdir in system properties and HBase conf 2024-12-04T06:54:25,168 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-04T06:54:25,168 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-04T06:54:25,168 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-04T06:54:25,182 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-04T06:54:25,266 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:54:25,277 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T06:54:25,279 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T06:54:25,279 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T06:54:25,279 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-04T06:54:25,279 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:54:25,280 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@446d84f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/hadoop.log.dir/,AVAILABLE} 2024-12-04T06:54:25,280 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@640c9e09{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T06:54:25,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:25,411 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@282647c0{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/java.io.tmpdir/jetty-localhost-42679-hadoop-hdfs-3_4_1-tests_jar-_-any-9776614116447159518/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-04T06:54:25,412 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3db1ef4a{HTTP/1.1, (http/1.1)}{localhost:42679} 2024-12-04T06:54:25,412 INFO [Time-limited test {}] server.Server(415): Started @237937ms 2024-12-04T06:54:25,426 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-04T06:54:25,493 INFO [regionserver/607fd5c6574c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T06:54:25,511 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:54:25,514 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T06:54:25,516 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T06:54:25,516 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T06:54:25,517 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T06:54:25,520 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@a8558d3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/hadoop.log.dir/,AVAILABLE} 2024-12-04T06:54:25,521 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@33382c80{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T06:54:25,645 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6f8f17a1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/java.io.tmpdir/jetty-localhost-39927-hadoop-hdfs-3_4_1-tests_jar-_-any-14737233021596644760/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:54:25,645 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7da32d48{HTTP/1.1, (http/1.1)}{localhost:39927} 2024-12-04T06:54:25,645 INFO [Time-limited test {}] server.Server(415): Started @238170ms 2024-12-04T06:54:25,646 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T06:54:25,682 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:54:25,684 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T06:54:25,685 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T06:54:25,685 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T06:54:25,685 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-04T06:54:25,686 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@250c37c4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/hadoop.log.dir/,AVAILABLE} 2024-12-04T06:54:25,686 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@284a8a01{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T06:54:25,775 WARN [Thread-1942 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/cluster_6d60fc30-8bea-1b14-0596-45cf526269fc/data/data1/current/BP-1932486448-172.17.0.2-1733295265189/current, will proceed with Du for space computation calculation, 2024-12-04T06:54:25,775 WARN [Thread-1943 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/cluster_6d60fc30-8bea-1b14-0596-45cf526269fc/data/data2/current/BP-1932486448-172.17.0.2-1733295265189/current, will proceed with Du for space computation calculation, 2024-12-04T06:54:25,799 WARN [Thread-1921 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T06:54:25,804 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x75f34db346cfb361 with lease ID 0xc30bdff59c9bb2c7: Processing first storage report for DS-593b0c39-f205-4be7-96a8-756a8bd25f9c from datanode DatanodeRegistration(127.0.0.1:35313, datanodeUuid=a5cb09a8-5cf6-49d7-95c7-697f13452c95, infoPort=37635, infoSecurePort=0, ipcPort=38833, storageInfo=lv=-57;cid=testClusterID;nsid=1182198994;c=1733295265189) 2024-12-04T06:54:25,804 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x75f34db346cfb361 with lease ID 0xc30bdff59c9bb2c7: from storage DS-593b0c39-f205-4be7-96a8-756a8bd25f9c node DatanodeRegistration(127.0.0.1:35313, datanodeUuid=a5cb09a8-5cf6-49d7-95c7-697f13452c95, infoPort=37635, infoSecurePort=0, ipcPort=38833, storageInfo=lv=-57;cid=testClusterID;nsid=1182198994;c=1733295265189), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:54:25,804 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x75f34db346cfb361 with lease ID 0xc30bdff59c9bb2c7: Processing first storage report for DS-29911db1-37a9-4609-a05d-6a64a2da05c6 from datanode DatanodeRegistration(127.0.0.1:35313, datanodeUuid=a5cb09a8-5cf6-49d7-95c7-697f13452c95, infoPort=37635, infoSecurePort=0, ipcPort=38833, storageInfo=lv=-57;cid=testClusterID;nsid=1182198994;c=1733295265189) 2024-12-04T06:54:25,804 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x75f34db346cfb361 with lease ID 0xc30bdff59c9bb2c7: from storage DS-29911db1-37a9-4609-a05d-6a64a2da05c6 node DatanodeRegistration(127.0.0.1:35313, datanodeUuid=a5cb09a8-5cf6-49d7-95c7-697f13452c95, infoPort=37635, infoSecurePort=0, ipcPort=38833, storageInfo=lv=-57;cid=testClusterID;nsid=1182198994;c=1733295265189), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:54:25,843 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2735da07{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/java.io.tmpdir/jetty-localhost-41685-hadoop-hdfs-3_4_1-tests_jar-_-any-10140229943131254172/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:54:25,843 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3e770e70{HTTP/1.1, (http/1.1)}{localhost:41685} 2024-12-04T06:54:25,843 INFO [Time-limited test {}] server.Server(415): Started @238368ms 2024-12-04T06:54:25,844 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T06:54:25,979 WARN [Thread-1968 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/cluster_6d60fc30-8bea-1b14-0596-45cf526269fc/data/data3/current/BP-1932486448-172.17.0.2-1733295265189/current, will proceed with Du for space computation calculation, 2024-12-04T06:54:25,979 WARN [Thread-1969 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/cluster_6d60fc30-8bea-1b14-0596-45cf526269fc/data/data4/current/BP-1932486448-172.17.0.2-1733295265189/current, will proceed with Du for space computation calculation, 2024-12-04T06:54:26,001 WARN [Thread-1957 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T06:54:26,004 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc5f80f441c143a0e with lease ID 0xc30bdff59c9bb2c8: Processing first storage report for DS-6757cdab-6cc1-44d6-a56a-bd549db4e90b from datanode DatanodeRegistration(127.0.0.1:43645, datanodeUuid=c135430b-62b1-46cb-8901-96a27938365a, infoPort=46187, infoSecurePort=0, ipcPort=34583, storageInfo=lv=-57;cid=testClusterID;nsid=1182198994;c=1733295265189) 2024-12-04T06:54:26,004 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc5f80f441c143a0e with lease ID 0xc30bdff59c9bb2c8: from storage DS-6757cdab-6cc1-44d6-a56a-bd549db4e90b node DatanodeRegistration(127.0.0.1:43645, datanodeUuid=c135430b-62b1-46cb-8901-96a27938365a, infoPort=46187, infoSecurePort=0, ipcPort=34583, storageInfo=lv=-57;cid=testClusterID;nsid=1182198994;c=1733295265189), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:54:26,004 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc5f80f441c143a0e with lease ID 0xc30bdff59c9bb2c8: Processing first storage report for DS-20937c50-a7b6-4907-b3f7-fee237e34965 from datanode DatanodeRegistration(127.0.0.1:43645, datanodeUuid=c135430b-62b1-46cb-8901-96a27938365a, infoPort=46187, infoSecurePort=0, ipcPort=34583, storageInfo=lv=-57;cid=testClusterID;nsid=1182198994;c=1733295265189) 2024-12-04T06:54:26,004 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc5f80f441c143a0e with lease ID 0xc30bdff59c9bb2c8: from storage DS-20937c50-a7b6-4907-b3f7-fee237e34965 node DatanodeRegistration(127.0.0.1:43645, datanodeUuid=c135430b-62b1-46cb-8901-96a27938365a, infoPort=46187, infoSecurePort=0, ipcPort=34583, storageInfo=lv=-57;cid=testClusterID;nsid=1182198994;c=1733295265189), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:54:26,088 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d 2024-12-04T06:54:26,092 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/cluster_6d60fc30-8bea-1b14-0596-45cf526269fc/zookeeper_0, clientPort=60571, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/cluster_6d60fc30-8bea-1b14-0596-45cf526269fc/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/cluster_6d60fc30-8bea-1b14-0596-45cf526269fc/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-04T06:54:26,093 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60571 2024-12-04T06:54:26,094 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:54:26,095 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:54:26,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741825_1001 (size=7) 2024-12-04T06:54:26,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741825_1001 (size=7) 2024-12-04T06:54:26,105 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0 with version=8 2024-12-04T06:54:26,105 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/hbase-staging 2024-12-04T06:54:26,107 INFO [Time-limited test {}] client.ConnectionUtils(128): master/607fd5c6574c:0 server-side Connection retries=45 2024-12-04T06:54:26,107 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T06:54:26,107 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T06:54:26,107 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T06:54:26,108 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T06:54:26,108 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T06:54:26,108 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-04T06:54:26,108 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T06:54:26,109 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36799 2024-12-04T06:54:26,110 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36799 connecting to ZooKeeper ensemble=127.0.0.1:60571 2024-12-04T06:54:26,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:367990x0, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T06:54:26,118 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36799-0x1017c40c3ba0000 connected 2024-12-04T06:54:26,121 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:26,136 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:54:26,138 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:54:26,141 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36799-0x1017c40c3ba0000, quorum=127.0.0.1:60571, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T06:54:26,141 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0, hbase.cluster.distributed=false 2024-12-04T06:54:26,143 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36799-0x1017c40c3ba0000, quorum=127.0.0.1:60571, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T06:54:26,147 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36799 2024-12-04T06:54:26,147 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36799 2024-12-04T06:54:26,147 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36799 2024-12-04T06:54:26,149 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36799 2024-12-04T06:54:26,149 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36799 2024-12-04T06:54:26,167 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/607fd5c6574c:0 server-side Connection retries=45 2024-12-04T06:54:26,167 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T06:54:26,167 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T06:54:26,167 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T06:54:26,167 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T06:54:26,167 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T06:54:26,167 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-04T06:54:26,167 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T06:54:26,168 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:32897 2024-12-04T06:54:26,169 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:32897 connecting to ZooKeeper ensemble=127.0.0.1:60571 2024-12-04T06:54:26,170 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:54:26,171 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:54:26,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:328970x0, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T06:54:26,176 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:32897-0x1017c40c3ba0001 connected 2024-12-04T06:54:26,176 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32897-0x1017c40c3ba0001, quorum=127.0.0.1:60571, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T06:54:26,176 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-04T06:54:26,177 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-04T06:54:26,178 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32897-0x1017c40c3ba0001, quorum=127.0.0.1:60571, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-04T06:54:26,179 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32897-0x1017c40c3ba0001, quorum=127.0.0.1:60571, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T06:54:26,180 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32897 2024-12-04T06:54:26,180 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32897 2024-12-04T06:54:26,180 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32897 2024-12-04T06:54:26,181 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32897 2024-12-04T06:54:26,181 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32897 2024-12-04T06:54:26,193 DEBUG [M:0;607fd5c6574c:36799 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;607fd5c6574c:36799 2024-12-04T06:54:26,193 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/607fd5c6574c,36799,1733295266107 2024-12-04T06:54:26,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36799-0x1017c40c3ba0000, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T06:54:26,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32897-0x1017c40c3ba0001, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T06:54:26,196 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36799-0x1017c40c3ba0000, quorum=127.0.0.1:60571, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/607fd5c6574c,36799,1733295266107 2024-12-04T06:54:26,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32897-0x1017c40c3ba0001, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-04T06:54:26,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36799-0x1017c40c3ba0000, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:54:26,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32897-0x1017c40c3ba0001, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:54:26,198 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36799-0x1017c40c3ba0000, quorum=127.0.0.1:60571, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-04T06:54:26,198 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/607fd5c6574c,36799,1733295266107 from backup master directory 2024-12-04T06:54:26,200 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36799-0x1017c40c3ba0000, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/607fd5c6574c,36799,1733295266107 2024-12-04T06:54:26,200 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36799-0x1017c40c3ba0000, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T06:54:26,200 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32897-0x1017c40c3ba0001, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T06:54:26,201 WARN [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T06:54:26,201 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=607fd5c6574c,36799,1733295266107 2024-12-04T06:54:26,209 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/hbase.id] with ID: a7b2c3f9-bf06-405b-95ef-f3a14a214150 2024-12-04T06:54:26,209 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/.tmp/hbase.id 2024-12-04T06:54:26,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741826_1002 (size=42) 2024-12-04T06:54:26,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741826_1002 (size=42) 2024-12-04T06:54:26,219 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/.tmp/hbase.id]:[hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/hbase.id] 2024-12-04T06:54:26,232 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:54:26,233 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-04T06:54:26,234 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-04T06:54:26,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36799-0x1017c40c3ba0000, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:54:26,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32897-0x1017c40c3ba0001, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:54:26,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741827_1003 (size=196) 2024-12-04T06:54:26,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741827_1003 (size=196) 2024-12-04T06:54:26,248 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T06:54:26,249 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-04T06:54:26,249 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T06:54:26,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741828_1004 (size=1189) 2024-12-04T06:54:26,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741828_1004 (size=1189) 2024-12-04T06:54:26,258 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/MasterData/data/master/store 2024-12-04T06:54:26,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741829_1005 (size=34) 2024-12-04T06:54:26,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741829_1005 (size=34) 2024-12-04T06:54:26,265 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:54:26,265 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-04T06:54:26,265 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:54:26,265 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:54:26,265 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-04T06:54:26,265 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:54:26,265 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:54:26,265 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733295266265Disabling compacts and flushes for region at 1733295266265Disabling writes for close at 1733295266265Writing region close event to WAL at 1733295266265Closed at 1733295266265 2024-12-04T06:54:26,266 WARN [master/607fd5c6574c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/MasterData/data/master/store/.initializing 2024-12-04T06:54:26,266 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/MasterData/WALs/607fd5c6574c,36799,1733295266107 2024-12-04T06:54:26,269 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=607fd5c6574c%2C36799%2C1733295266107, suffix=, logDir=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/MasterData/WALs/607fd5c6574c,36799,1733295266107, archiveDir=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/MasterData/oldWALs, maxLogs=10 2024-12-04T06:54:26,269 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C36799%2C1733295266107.1733295266269 2024-12-04T06:54:26,277 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/MasterData/WALs/607fd5c6574c,36799,1733295266107/607fd5c6574c%2C36799%2C1733295266107.1733295266269 2024-12-04T06:54:26,279 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37635:37635),(127.0.0.1/127.0.0.1:46187:46187)] 2024-12-04T06:54:26,284 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-04T06:54:26,285 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:54:26,285 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:54:26,285 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:54:26,286 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:54:26,287 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-04T06:54:26,287 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:54:26,288 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:54:26,288 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:54:26,289 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-04T06:54:26,289 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:54:26,289 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T06:54:26,289 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:54:26,290 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-04T06:54:26,290 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:54:26,291 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T06:54:26,291 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:54:26,292 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-04T06:54:26,292 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:54:26,293 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T06:54:26,293 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:54:26,293 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:54:26,294 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:54:26,295 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:54:26,295 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:54:26,296 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-04T06:54:26,297 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:54:26,299 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T06:54:26,300 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=751154, jitterRate=-0.04485918581485748}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-04T06:54:26,301 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733295266285Initializing all the Stores at 1733295266286 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295266286Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295266286Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295266286Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295266286Cleaning up temporary data from old regions at 1733295266295 (+9 ms)Region opened successfully at 1733295266301 (+6 ms) 2024-12-04T06:54:26,301 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-04T06:54:26,304 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46b98b1d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=607fd5c6574c/172.17.0.2:0 2024-12-04T06:54:26,305 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-04T06:54:26,305 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-04T06:54:26,305 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-04T06:54:26,305 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-04T06:54:26,306 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-04T06:54:26,306 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-04T06:54:26,306 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-04T06:54:26,308 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-04T06:54:26,309 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36799-0x1017c40c3ba0000, quorum=127.0.0.1:60571, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-04T06:54:26,311 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-04T06:54:26,311 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-04T06:54:26,312 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36799-0x1017c40c3ba0000, quorum=127.0.0.1:60571, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-04T06:54:26,313 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-04T06:54:26,314 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-04T06:54:26,314 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36799-0x1017c40c3ba0000, quorum=127.0.0.1:60571, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-04T06:54:26,318 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-04T06:54:26,318 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36799-0x1017c40c3ba0000, quorum=127.0.0.1:60571, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-04T06:54:26,320 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-04T06:54:26,322 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36799-0x1017c40c3ba0000, quorum=127.0.0.1:60571, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-04T06:54:26,323 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-04T06:54:26,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36799-0x1017c40c3ba0000, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T06:54:26,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32897-0x1017c40c3ba0001, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T06:54:26,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36799-0x1017c40c3ba0000, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:54:26,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32897-0x1017c40c3ba0001, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:54:26,325 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=607fd5c6574c,36799,1733295266107, sessionid=0x1017c40c3ba0000, setting cluster-up flag (Was=false) 2024-12-04T06:54:26,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36799-0x1017c40c3ba0000, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:54:26,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32897-0x1017c40c3ba0001, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:54:26,335 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-04T06:54:26,336 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=607fd5c6574c,36799,1733295266107 2024-12-04T06:54:26,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36799-0x1017c40c3ba0000, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:54:26,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32897-0x1017c40c3ba0001, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:54:26,346 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-04T06:54:26,347 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=607fd5c6574c,36799,1733295266107 2024-12-04T06:54:26,349 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-04T06:54:26,350 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-04T06:54:26,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:26,351 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-04T06:54:26,351 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-04T06:54:26,351 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 607fd5c6574c,36799,1733295266107 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-04T06:54:26,353 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/607fd5c6574c:0, corePoolSize=5, maxPoolSize=5 2024-12-04T06:54:26,353 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/607fd5c6574c:0, corePoolSize=5, maxPoolSize=5 2024-12-04T06:54:26,353 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/607fd5c6574c:0, corePoolSize=5, maxPoolSize=5 2024-12-04T06:54:26,353 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/607fd5c6574c:0, corePoolSize=5, maxPoolSize=5 2024-12-04T06:54:26,353 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/607fd5c6574c:0, corePoolSize=10, maxPoolSize=10 2024-12-04T06:54:26,353 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:54:26,353 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/607fd5c6574c:0, corePoolSize=2, maxPoolSize=2 2024-12-04T06:54:26,353 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:54:26,357 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733295296357 2024-12-04T06:54:26,357 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-04T06:54:26,357 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-04T06:54:26,357 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-04T06:54:26,357 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-04T06:54:26,357 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-04T06:54:26,358 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-04T06:54:26,358 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-04T06:54:26,358 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-04T06:54:26,358 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-04T06:54:26,358 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-04T06:54:26,358 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T06:54:26,359 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-04T06:54:26,359 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-04T06:54:26,359 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-04T06:54:26,359 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.large.0-1733295266359,5,FailOnTimeoutGroup] 2024-12-04T06:54:26,359 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.small.0-1733295266359,5,FailOnTimeoutGroup] 2024-12-04T06:54:26,359 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-04T06:54:26,360 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-04T06:54:26,360 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-04T06:54:26,360 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-04T06:54:26,360 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:54:26,360 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-04T06:54:26,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741831_1007 (size=1321) 2024-12-04T06:54:26,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741831_1007 (size=1321) 2024-12-04T06:54:26,368 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-04T06:54:26,368 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0 2024-12-04T06:54:26,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741832_1008 (size=32) 2024-12-04T06:54:26,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741832_1008 (size=32) 2024-12-04T06:54:26,380 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:54:26,381 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-04T06:54:26,382 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-04T06:54:26,383 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:54:26,383 INFO [RS:0;607fd5c6574c:32897 {}] regionserver.HRegionServer(746): ClusterId : a7b2c3f9-bf06-405b-95ef-f3a14a214150 2024-12-04T06:54:26,383 DEBUG [RS:0;607fd5c6574c:32897 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-04T06:54:26,383 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:54:26,383 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-04T06:54:26,384 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-04T06:54:26,384 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:54:26,385 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:54:26,385 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-04T06:54:26,385 DEBUG [RS:0;607fd5c6574c:32897 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-04T06:54:26,385 DEBUG [RS:0;607fd5c6574c:32897 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-04T06:54:26,386 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-04T06:54:26,386 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:54:26,386 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:54:26,387 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-04T06:54:26,387 DEBUG [RS:0;607fd5c6574c:32897 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-04T06:54:26,388 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-04T06:54:26,388 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:54:26,388 DEBUG [RS:0;607fd5c6574c:32897 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ebb6396, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=607fd5c6574c/172.17.0.2:0 2024-12-04T06:54:26,388 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:54:26,388 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-04T06:54:26,389 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/hbase/meta/1588230740 2024-12-04T06:54:26,389 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/hbase/meta/1588230740 2024-12-04T06:54:26,391 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-04T06:54:26,391 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-04T06:54:26,392 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T06:54:26,393 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-04T06:54:26,397 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T06:54:26,398 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=747166, jitterRate=-0.049930036067962646}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T06:54:26,399 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733295266380Initializing all the Stores at 1733295266381 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295266381Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295266381Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295266381Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295266381Cleaning up temporary data from old regions at 1733295266391 (+10 ms)Region opened successfully at 1733295266399 (+8 ms) 2024-12-04T06:54:26,399 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-04T06:54:26,399 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-04T06:54:26,399 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-04T06:54:26,399 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-04T06:54:26,399 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-04T06:54:26,400 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-04T06:54:26,400 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733295266399Disabling compacts and flushes for region at 1733295266399Disabling writes for close at 1733295266399Writing region close event to WAL at 1733295266400 (+1 ms)Closed at 1733295266400 2024-12-04T06:54:26,402 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T06:54:26,402 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-04T06:54:26,402 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-04T06:54:26,403 DEBUG [RS:0;607fd5c6574c:32897 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;607fd5c6574c:32897 2024-12-04T06:54:26,403 INFO [RS:0;607fd5c6574c:32897 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-04T06:54:26,403 INFO [RS:0;607fd5c6574c:32897 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-04T06:54:26,403 DEBUG [RS:0;607fd5c6574c:32897 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-04T06:54:26,403 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-04T06:54:26,404 INFO [RS:0;607fd5c6574c:32897 {}] regionserver.HRegionServer(2659): reportForDuty to master=607fd5c6574c,36799,1733295266107 with port=32897, startcode=1733295266166 2024-12-04T06:54:26,404 DEBUG [RS:0;607fd5c6574c:32897 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-04T06:54:26,404 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-04T06:54:26,407 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37745, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-12-04T06:54:26,408 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36799 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 607fd5c6574c,32897,1733295266166 2024-12-04T06:54:26,408 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36799 {}] master.ServerManager(517): Registering regionserver=607fd5c6574c,32897,1733295266166 2024-12-04T06:54:26,410 DEBUG [RS:0;607fd5c6574c:32897 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0 2024-12-04T06:54:26,410 DEBUG [RS:0;607fd5c6574c:32897 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41769 2024-12-04T06:54:26,410 DEBUG [RS:0;607fd5c6574c:32897 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-04T06:54:26,413 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36799-0x1017c40c3ba0000, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T06:54:26,413 DEBUG [RS:0;607fd5c6574c:32897 {}] zookeeper.ZKUtil(111): regionserver:32897-0x1017c40c3ba0001, quorum=127.0.0.1:60571, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/607fd5c6574c,32897,1733295266166 2024-12-04T06:54:26,414 WARN [RS:0;607fd5c6574c:32897 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T06:54:26,414 INFO [RS:0;607fd5c6574c:32897 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T06:54:26,414 DEBUG [RS:0;607fd5c6574c:32897 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/WALs/607fd5c6574c,32897,1733295266166 2024-12-04T06:54:26,414 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [607fd5c6574c,32897,1733295266166] 2024-12-04T06:54:26,418 INFO [RS:0;607fd5c6574c:32897 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-04T06:54:26,420 INFO [RS:0;607fd5c6574c:32897 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-04T06:54:26,421 INFO [RS:0;607fd5c6574c:32897 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-04T06:54:26,421 INFO [RS:0;607fd5c6574c:32897 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T06:54:26,421 INFO [RS:0;607fd5c6574c:32897 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-04T06:54:26,422 INFO [RS:0;607fd5c6574c:32897 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-04T06:54:26,422 INFO [RS:0;607fd5c6574c:32897 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-04T06:54:26,422 DEBUG [RS:0;607fd5c6574c:32897 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:54:26,422 DEBUG [RS:0;607fd5c6574c:32897 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:54:26,422 DEBUG [RS:0;607fd5c6574c:32897 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:54:26,422 DEBUG [RS:0;607fd5c6574c:32897 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:54:26,422 DEBUG [RS:0;607fd5c6574c:32897 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:54:26,422 DEBUG [RS:0;607fd5c6574c:32897 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/607fd5c6574c:0, corePoolSize=2, maxPoolSize=2 2024-12-04T06:54:26,422 DEBUG [RS:0;607fd5c6574c:32897 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:54:26,422 DEBUG [RS:0;607fd5c6574c:32897 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:54:26,422 DEBUG [RS:0;607fd5c6574c:32897 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:54:26,422 DEBUG [RS:0;607fd5c6574c:32897 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:54:26,422 DEBUG [RS:0;607fd5c6574c:32897 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:54:26,422 DEBUG [RS:0;607fd5c6574c:32897 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:54:26,422 DEBUG [RS:0;607fd5c6574c:32897 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/607fd5c6574c:0, corePoolSize=3, maxPoolSize=3 2024-12-04T06:54:26,422 DEBUG [RS:0;607fd5c6574c:32897 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0, corePoolSize=3, maxPoolSize=3 2024-12-04T06:54:26,423 INFO [RS:0;607fd5c6574c:32897 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T06:54:26,423 INFO [RS:0;607fd5c6574c:32897 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T06:54:26,423 INFO [RS:0;607fd5c6574c:32897 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T06:54:26,423 INFO [RS:0;607fd5c6574c:32897 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-04T06:54:26,423 INFO [RS:0;607fd5c6574c:32897 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-04T06:54:26,423 INFO [RS:0;607fd5c6574c:32897 {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,32897,1733295266166-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T06:54:26,439 INFO [RS:0;607fd5c6574c:32897 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-04T06:54:26,439 INFO [RS:0;607fd5c6574c:32897 {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,32897,1733295266166-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T06:54:26,439 INFO [RS:0;607fd5c6574c:32897 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:54:26,439 INFO [RS:0;607fd5c6574c:32897 {}] regionserver.Replication(171): 607fd5c6574c,32897,1733295266166 started 2024-12-04T06:54:26,456 INFO [RS:0;607fd5c6574c:32897 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:54:26,456 INFO [RS:0;607fd5c6574c:32897 {}] regionserver.HRegionServer(1482): Serving as 607fd5c6574c,32897,1733295266166, RpcServer on 607fd5c6574c/172.17.0.2:32897, sessionid=0x1017c40c3ba0001 2024-12-04T06:54:26,456 DEBUG [RS:0;607fd5c6574c:32897 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-04T06:54:26,456 DEBUG [RS:0;607fd5c6574c:32897 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 607fd5c6574c,32897,1733295266166 2024-12-04T06:54:26,456 DEBUG [RS:0;607fd5c6574c:32897 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '607fd5c6574c,32897,1733295266166' 2024-12-04T06:54:26,456 DEBUG [RS:0;607fd5c6574c:32897 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-04T06:54:26,457 DEBUG [RS:0;607fd5c6574c:32897 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-04T06:54:26,457 DEBUG [RS:0;607fd5c6574c:32897 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-04T06:54:26,457 DEBUG [RS:0;607fd5c6574c:32897 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-04T06:54:26,457 DEBUG [RS:0;607fd5c6574c:32897 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 607fd5c6574c,32897,1733295266166 2024-12-04T06:54:26,457 DEBUG [RS:0;607fd5c6574c:32897 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '607fd5c6574c,32897,1733295266166' 2024-12-04T06:54:26,457 DEBUG [RS:0;607fd5c6574c:32897 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-04T06:54:26,458 DEBUG [RS:0;607fd5c6574c:32897 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-04T06:54:26,458 DEBUG [RS:0;607fd5c6574c:32897 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-04T06:54:26,458 INFO [RS:0;607fd5c6574c:32897 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-04T06:54:26,458 INFO [RS:0;607fd5c6574c:32897 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-04T06:54:26,555 WARN [607fd5c6574c:36799 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-04T06:54:26,561 INFO [RS:0;607fd5c6574c:32897 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=607fd5c6574c%2C32897%2C1733295266166, suffix=, logDir=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/WALs/607fd5c6574c,32897,1733295266166, archiveDir=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/oldWALs, maxLogs=32 2024-12-04T06:54:26,561 INFO [RS:0;607fd5c6574c:32897 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C32897%2C1733295266166.1733295266561 2024-12-04T06:54:26,568 INFO [RS:0;607fd5c6574c:32897 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/WALs/607fd5c6574c,32897,1733295266166/607fd5c6574c%2C32897%2C1733295266166.1733295266561 2024-12-04T06:54:26,572 DEBUG [RS:0;607fd5c6574c:32897 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46187:46187),(127.0.0.1/127.0.0.1:37635:37635)] 2024-12-04T06:54:26,805 DEBUG [607fd5c6574c:36799 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-04T06:54:26,806 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=607fd5c6574c,32897,1733295266166 2024-12-04T06:54:26,807 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 607fd5c6574c,32897,1733295266166, state=OPENING 2024-12-04T06:54:26,809 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-04T06:54:26,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36799-0x1017c40c3ba0000, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:54:26,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32897-0x1017c40c3ba0001, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:54:26,811 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-04T06:54:26,811 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=607fd5c6574c,32897,1733295266166}] 2024-12-04T06:54:26,811 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T06:54:26,811 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T06:54:26,964 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-04T06:54:26,967 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55235, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-04T06:54:26,971 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-04T06:54:26,971 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T06:54:26,972 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=607fd5c6574c%2C32897%2C1733295266166.meta, suffix=.meta, logDir=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/WALs/607fd5c6574c,32897,1733295266166, archiveDir=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/oldWALs, maxLogs=32 2024-12-04T06:54:26,973 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C32897%2C1733295266166.meta.1733295266973.meta 2024-12-04T06:54:26,981 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/WALs/607fd5c6574c,32897,1733295266166/607fd5c6574c%2C32897%2C1733295266166.meta.1733295266973.meta 2024-12-04T06:54:26,982 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37635:37635),(127.0.0.1/127.0.0.1:46187:46187)] 2024-12-04T06:54:26,983 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-04T06:54:26,983 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-04T06:54:26,983 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-04T06:54:26,983 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-04T06:54:26,983 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-04T06:54:26,983 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:54:26,983 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-04T06:54:26,983 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-04T06:54:26,985 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-04T06:54:26,985 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-04T06:54:26,986 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:54:26,986 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:54:26,986 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-04T06:54:26,987 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-04T06:54:26,987 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:54:26,987 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:54:26,987 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-04T06:54:26,987 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-04T06:54:26,987 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:54:26,988 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:54:26,988 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-04T06:54:26,988 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-04T06:54:26,988 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:54:26,989 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:54:26,989 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-04T06:54:26,990 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/hbase/meta/1588230740 2024-12-04T06:54:26,990 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/hbase/meta/1588230740 2024-12-04T06:54:26,992 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-04T06:54:26,992 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-04T06:54:26,993 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T06:54:26,994 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-04T06:54:26,995 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=710938, jitterRate=-0.09599561989307404}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T06:54:26,995 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-04T06:54:26,995 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733295266984Writing region info on filesystem at 1733295266984Initializing all the Stores at 1733295266984Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295266984Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295266985 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295266985Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295266985Cleaning up temporary data from old regions at 1733295266992 (+7 ms)Running coprocessor post-open hooks at 1733295266995 (+3 ms)Region opened successfully at 1733295266995 2024-12-04T06:54:26,997 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733295266964 2024-12-04T06:54:26,999 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-04T06:54:26,999 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-04T06:54:27,000 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=607fd5c6574c,32897,1733295266166 2024-12-04T06:54:27,001 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 607fd5c6574c,32897,1733295266166, state=OPEN 2024-12-04T06:54:27,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36799-0x1017c40c3ba0000, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T06:54:27,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32897-0x1017c40c3ba0001, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T06:54:27,008 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=607fd5c6574c,32897,1733295266166 2024-12-04T06:54:27,008 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T06:54:27,008 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T06:54:27,010 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-04T06:54:27,011 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=607fd5c6574c,32897,1733295266166 in 197 msec 2024-12-04T06:54:27,013 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-04T06:54:27,013 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 609 msec 2024-12-04T06:54:27,014 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T06:54:27,014 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-04T06:54:27,015 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-04T06:54:27,015 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=607fd5c6574c,32897,1733295266166, seqNum=-1] 2024-12-04T06:54:27,015 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T06:54:27,016 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54859, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T06:54:27,021 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 670 msec 2024-12-04T06:54:27,021 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733295267021, completionTime=-1 2024-12-04T06:54:27,021 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-04T06:54:27,021 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-04T06:54:27,023 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-04T06:54:27,024 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733295327024 2024-12-04T06:54:27,024 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733295387024 2024-12-04T06:54:27,024 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-04T06:54:27,024 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,36799,1733295266107-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T06:54:27,024 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,36799,1733295266107-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:54:27,024 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,36799,1733295266107-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:54:27,024 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-607fd5c6574c:36799, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:54:27,024 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-04T06:54:27,025 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-04T06:54:27,026 DEBUG [master/607fd5c6574c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-04T06:54:27,029 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.828sec 2024-12-04T06:54:27,029 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-04T06:54:27,029 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-04T06:54:27,029 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-04T06:54:27,029 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-04T06:54:27,029 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-04T06:54:27,029 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,36799,1733295266107-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T06:54:27,029 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,36799,1733295266107-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-04T06:54:27,032 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-04T06:54:27,032 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-04T06:54:27,032 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,36799,1733295266107-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:54:27,084 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ab86f9f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T06:54:27,084 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 607fd5c6574c,36799,-1 for getting cluster id 2024-12-04T06:54:27,085 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-04T06:54:27,086 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a7b2c3f9-bf06-405b-95ef-f3a14a214150' 2024-12-04T06:54:27,086 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-04T06:54:27,086 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a7b2c3f9-bf06-405b-95ef-f3a14a214150" 2024-12-04T06:54:27,087 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@cd00e9f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T06:54:27,087 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [607fd5c6574c,36799,-1] 2024-12-04T06:54:27,087 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-04T06:54:27,087 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:54:27,088 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52802, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-04T06:54:27,089 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4437c7ec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T06:54:27,090 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-04T06:54:27,091 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=607fd5c6574c,32897,1733295266166, seqNum=-1] 2024-12-04T06:54:27,091 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T06:54:27,092 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43836, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T06:54:27,093 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=607fd5c6574c,36799,1733295266107 2024-12-04T06:54:27,093 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:54:27,096 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-04T06:54:27,097 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-04T06:54:27,098 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 607fd5c6574c,36799,1733295266107 2024-12-04T06:54:27,098 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1504dcab 2024-12-04T06:54:27,098 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-04T06:54:27,099 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52810, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-04T06:54:27,099 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36799 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-04T06:54:27,100 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36799 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-04T06:54:27,100 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36799 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T06:54:27,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36799 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-12-04T06:54:27,103 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-04T06:54:27,103 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:54:27,104 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36799 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-12-04T06:54:27,104 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-04T06:54:27,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36799 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-04T06:54:27,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741835_1011 (size=381) 2024-12-04T06:54:27,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741835_1011 (size=381) 2024-12-04T06:54:27,120 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 90e74c91b24a6a418dfd5e19734cb3a0, NAME => 'TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0 2024-12-04T06:54:27,121 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:27,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741836_1012 (size=64) 2024-12-04T06:54:27,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741836_1012 (size=64) 2024-12-04T06:54:27,131 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:54:27,131 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 90e74c91b24a6a418dfd5e19734cb3a0, disabling compactions & flushes 2024-12-04T06:54:27,131 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0. 2024-12-04T06:54:27,131 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0. 2024-12-04T06:54:27,131 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0. after waiting 0 ms 2024-12-04T06:54:27,131 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0. 2024-12-04T06:54:27,131 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0. 2024-12-04T06:54:27,131 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 90e74c91b24a6a418dfd5e19734cb3a0: Waiting for close lock at 1733295267131Disabling compacts and flushes for region at 1733295267131Disabling writes for close at 1733295267131Writing region close event to WAL at 1733295267131Closed at 1733295267131 2024-12-04T06:54:27,133 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-04T06:54:27,133 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733295267133"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733295267133"}]},"ts":"1733295267133"} 2024-12-04T06:54:27,136 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-04T06:54:27,137 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-04T06:54:27,137 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733295267137"}]},"ts":"1733295267137"} 2024-12-04T06:54:27,139 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-12-04T06:54:27,139 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=90e74c91b24a6a418dfd5e19734cb3a0, ASSIGN}] 2024-12-04T06:54:27,141 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=90e74c91b24a6a418dfd5e19734cb3a0, ASSIGN 2024-12-04T06:54:27,142 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=90e74c91b24a6a418dfd5e19734cb3a0, ASSIGN; state=OFFLINE, location=607fd5c6574c,32897,1733295266166; forceNewPlan=false, retain=false 2024-12-04T06:54:27,293 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=90e74c91b24a6a418dfd5e19734cb3a0, regionState=OPENING, regionLocation=607fd5c6574c,32897,1733295266166 2024-12-04T06:54:27,295 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=90e74c91b24a6a418dfd5e19734cb3a0, ASSIGN because future has completed 2024-12-04T06:54:27,296 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 90e74c91b24a6a418dfd5e19734cb3a0, server=607fd5c6574c,32897,1733295266166}] 2024-12-04T06:54:27,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:27,452 INFO [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0. 2024-12-04T06:54:27,452 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 90e74c91b24a6a418dfd5e19734cb3a0, NAME => 'TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0.', STARTKEY => '', ENDKEY => ''} 2024-12-04T06:54:27,452 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 90e74c91b24a6a418dfd5e19734cb3a0 2024-12-04T06:54:27,452 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:54:27,453 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 90e74c91b24a6a418dfd5e19734cb3a0 2024-12-04T06:54:27,453 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 90e74c91b24a6a418dfd5e19734cb3a0 2024-12-04T06:54:27,454 INFO [StoreOpener-90e74c91b24a6a418dfd5e19734cb3a0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 90e74c91b24a6a418dfd5e19734cb3a0 2024-12-04T06:54:27,455 INFO [StoreOpener-90e74c91b24a6a418dfd5e19734cb3a0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 90e74c91b24a6a418dfd5e19734cb3a0 columnFamilyName info 2024-12-04T06:54:27,455 DEBUG [StoreOpener-90e74c91b24a6a418dfd5e19734cb3a0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:54:27,455 INFO [StoreOpener-90e74c91b24a6a418dfd5e19734cb3a0-1 {}] regionserver.HStore(327): Store=90e74c91b24a6a418dfd5e19734cb3a0/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T06:54:27,456 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 90e74c91b24a6a418dfd5e19734cb3a0 2024-12-04T06:54:27,456 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0 2024-12-04T06:54:27,456 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0 2024-12-04T06:54:27,457 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 90e74c91b24a6a418dfd5e19734cb3a0 2024-12-04T06:54:27,457 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 90e74c91b24a6a418dfd5e19734cb3a0 2024-12-04T06:54:27,458 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 90e74c91b24a6a418dfd5e19734cb3a0 2024-12-04T06:54:27,460 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T06:54:27,460 INFO [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 90e74c91b24a6a418dfd5e19734cb3a0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=768151, jitterRate=-0.023245543241500854}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-04T06:54:27,460 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 90e74c91b24a6a418dfd5e19734cb3a0 2024-12-04T06:54:27,461 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 90e74c91b24a6a418dfd5e19734cb3a0: Running coprocessor pre-open hook at 1733295267453Writing region info on filesystem at 1733295267453Initializing all the Stores at 1733295267453Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295267453Cleaning up temporary data from old regions at 1733295267457 (+4 ms)Running coprocessor post-open hooks at 1733295267460 (+3 ms)Region opened successfully at 1733295267461 (+1 ms) 2024-12-04T06:54:27,462 INFO [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0., pid=6, masterSystemTime=1733295267448 2024-12-04T06:54:27,464 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0. 2024-12-04T06:54:27,464 INFO [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0. 2024-12-04T06:54:27,465 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=90e74c91b24a6a418dfd5e19734cb3a0, regionState=OPEN, openSeqNum=2, regionLocation=607fd5c6574c,32897,1733295266166 2024-12-04T06:54:27,467 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 90e74c91b24a6a418dfd5e19734cb3a0, server=607fd5c6574c,32897,1733295266166 because future has completed 2024-12-04T06:54:27,470 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-04T06:54:27,470 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 90e74c91b24a6a418dfd5e19734cb3a0, server=607fd5c6574c,32897,1733295266166 in 172 msec 2024-12-04T06:54:27,472 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-04T06:54:27,472 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=90e74c91b24a6a418dfd5e19734cb3a0, ASSIGN in 331 msec 2024-12-04T06:54:27,473 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-04T06:54:27,474 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733295267473"}]},"ts":"1733295267473"} 2024-12-04T06:54:27,475 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-12-04T06:54:27,476 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-04T06:54:27,478 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 376 msec 2024-12-04T06:54:28,122 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:28,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:29,123 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:29,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:29,627 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:29,627 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:29,627 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:29,627 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:29,627 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:29,627 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:29,628 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:29,628 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:29,643 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:29,643 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:29,644 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:29,644 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:29,644 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:29,644 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:29,647 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:29,647 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:29,647 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:29,649 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:30,123 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:30,154 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-04T06:54:30,155 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:30,155 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:30,155 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:30,156 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:30,156 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:30,156 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:30,157 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:30,157 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:30,179 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:30,180 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:30,180 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:30,180 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:30,181 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:30,181 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:30,184 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:30,185 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:30,185 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:30,189 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:30,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:31,124 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:31,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:32,124 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:32,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:32,418 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-04T06:54:32,418 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-12-04T06:54:32,763 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-04T06:54:32,763 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-04T06:54:32,763 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-04T06:54:33,125 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:33,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:34,126 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:34,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:35,126 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:35,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:36,127 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:36,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:37,127 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:37,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36799 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-04T06:54:37,158 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-12-04T06:54:37,158 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-12-04T06:54:37,162 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-12-04T06:54:37,162 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0. 2024-12-04T06:54:37,165 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0., hostname=607fd5c6574c,32897,1733295266166, seqNum=2] 2024-12-04T06:54:37,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32897 {}] regionserver.HRegion(8855): Flush requested on 90e74c91b24a6a418dfd5e19734cb3a0 2024-12-04T06:54:37,178 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 90e74c91b24a6a418dfd5e19734cb3a0 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-04T06:54:37,197 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/.tmp/info/eaa2167f1b314780a7dbf8860fe9e934 is 1080, key is row0001/info:/1733295277166/Put/seqid=0 2024-12-04T06:54:37,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741837_1013 (size=12509) 2024-12-04T06:54:37,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741837_1013 (size=12509) 2024-12-04T06:54:37,203 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/.tmp/info/eaa2167f1b314780a7dbf8860fe9e934 2024-12-04T06:54:37,210 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/.tmp/info/eaa2167f1b314780a7dbf8860fe9e934 as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/eaa2167f1b314780a7dbf8860fe9e934 2024-12-04T06:54:37,216 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/eaa2167f1b314780a7dbf8860fe9e934, entries=7, sequenceid=11, filesize=12.2 K 2024-12-04T06:54:37,218 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 90e74c91b24a6a418dfd5e19734cb3a0 in 39ms, sequenceid=11, compaction requested=false 2024-12-04T06:54:37,218 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 90e74c91b24a6a418dfd5e19734cb3a0: 2024-12-04T06:54:37,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32897 {}] regionserver.HRegion(8855): Flush requested on 90e74c91b24a6a418dfd5e19734cb3a0 2024-12-04T06:54:37,219 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 90e74c91b24a6a418dfd5e19734cb3a0 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-04T06:54:37,224 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/.tmp/info/00fa7dedc89c4a79bc12573c09802daa is 1080, key is row0008/info:/1733295277179/Put/seqid=0 2024-12-04T06:54:37,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741838_1014 (size=29761) 2024-12-04T06:54:37,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741838_1014 (size=29761) 2024-12-04T06:54:37,230 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/.tmp/info/00fa7dedc89c4a79bc12573c09802daa 2024-12-04T06:54:37,236 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/.tmp/info/00fa7dedc89c4a79bc12573c09802daa as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/00fa7dedc89c4a79bc12573c09802daa 2024-12-04T06:54:37,241 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/00fa7dedc89c4a79bc12573c09802daa, entries=23, sequenceid=37, filesize=29.1 K 2024-12-04T06:54:37,243 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for 90e74c91b24a6a418dfd5e19734cb3a0 in 23ms, sequenceid=37, compaction requested=false 2024-12-04T06:54:37,243 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 90e74c91b24a6a418dfd5e19734cb3a0: 2024-12-04T06:54:37,243 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-12-04T06:54:37,243 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T06:54:37,243 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/00fa7dedc89c4a79bc12573c09802daa because midkey is the same as first or last row 2024-12-04T06:54:37,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:38,128 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:38,266 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-04T06:54:38,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:38,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:38,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:38,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:38,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:38,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:38,269 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:38,269 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:38,290 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:38,290 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:38,290 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:38,290 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:38,291 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:38,291 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:38,294 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:38,294 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:38,294 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:38,297 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:38,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:39,129 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:39,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32897 {}] regionserver.HRegion(8855): Flush requested on 90e74c91b24a6a418dfd5e19734cb3a0 2024-12-04T06:54:39,234 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 90e74c91b24a6a418dfd5e19734cb3a0 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-04T06:54:39,239 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/.tmp/info/cbb9077b21b44d77b0e0a0acac2abc46 is 1080, key is row0031/info:/1733295277220/Put/seqid=0 2024-12-04T06:54:39,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741839_1015 (size=12509) 2024-12-04T06:54:39,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741839_1015 (size=12509) 2024-12-04T06:54:39,257 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/.tmp/info/cbb9077b21b44d77b0e0a0acac2abc46 2024-12-04T06:54:39,265 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/.tmp/info/cbb9077b21b44d77b0e0a0acac2abc46 as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/cbb9077b21b44d77b0e0a0acac2abc46 2024-12-04T06:54:39,271 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/cbb9077b21b44d77b0e0a0acac2abc46, entries=7, sequenceid=47, filesize=12.2 K 2024-12-04T06:54:39,272 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=22.07 KB/22596 for 90e74c91b24a6a418dfd5e19734cb3a0 in 39ms, sequenceid=47, compaction requested=true 2024-12-04T06:54:39,272 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 90e74c91b24a6a418dfd5e19734cb3a0: 2024-12-04T06:54:39,272 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-12-04T06:54:39,272 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T06:54:39,273 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/00fa7dedc89c4a79bc12573c09802daa because midkey is the same as first or last row 2024-12-04T06:54:39,273 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 90e74c91b24a6a418dfd5e19734cb3a0:info, priority=-2147483648, current under compaction store size is 1 2024-12-04T06:54:39,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32897 {}] regionserver.HRegion(8855): Flush requested on 90e74c91b24a6a418dfd5e19734cb3a0 2024-12-04T06:54:39,273 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T06:54:39,273 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T06:54:39,273 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 90e74c91b24a6a418dfd5e19734cb3a0 1/1 column families, dataSize=23.12 KB heapSize=25 KB 2024-12-04T06:54:39,275 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T06:54:39,275 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HStore(1541): 90e74c91b24a6a418dfd5e19734cb3a0/info is initiating minor compaction (all files) 2024-12-04T06:54:39,275 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 90e74c91b24a6a418dfd5e19734cb3a0/info in TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0. 2024-12-04T06:54:39,275 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/eaa2167f1b314780a7dbf8860fe9e934, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/00fa7dedc89c4a79bc12573c09802daa, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/cbb9077b21b44d77b0e0a0acac2abc46] into tmpdir=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/.tmp, totalSize=53.5 K 2024-12-04T06:54:39,275 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.Compactor(225): Compacting eaa2167f1b314780a7dbf8860fe9e934, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733295277166 2024-12-04T06:54:39,276 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.Compactor(225): Compacting 00fa7dedc89c4a79bc12573c09802daa, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733295277179 2024-12-04T06:54:39,276 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.Compactor(225): Compacting cbb9077b21b44d77b0e0a0acac2abc46, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733295277220 2024-12-04T06:54:39,278 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/.tmp/info/22a340923e154d1393f4f8062dde7120 is 1080, key is row0038/info:/1733295279235/Put/seqid=0 2024-12-04T06:54:39,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741840_1016 (size=28684) 2024-12-04T06:54:39,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741840_1016 (size=28684) 2024-12-04T06:54:39,304 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 90e74c91b24a6a418dfd5e19734cb3a0#info#compaction#57 average throughput is 12.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T06:54:39,305 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/.tmp/info/b1e95095dae64b5db938d13d52e1762f is 1080, key is row0001/info:/1733295277166/Put/seqid=0 2024-12-04T06:54:39,305 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=23.12 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/.tmp/info/22a340923e154d1393f4f8062dde7120 2024-12-04T06:54:39,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741841_1017 (size=44978) 2024-12-04T06:54:39,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741841_1017 (size=44978) 2024-12-04T06:54:39,311 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/.tmp/info/22a340923e154d1393f4f8062dde7120 as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/22a340923e154d1393f4f8062dde7120 2024-12-04T06:54:39,317 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/.tmp/info/b1e95095dae64b5db938d13d52e1762f as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/b1e95095dae64b5db938d13d52e1762f 2024-12-04T06:54:39,317 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/22a340923e154d1393f4f8062dde7120, entries=22, sequenceid=72, filesize=28.0 K 2024-12-04T06:54:39,319 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.12 KB/23672, heapSize ~24.98 KB/25584, currentSize=5.25 KB/5380 for 90e74c91b24a6a418dfd5e19734cb3a0 in 46ms, sequenceid=72, compaction requested=false 2024-12-04T06:54:39,319 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 90e74c91b24a6a418dfd5e19734cb3a0: 2024-12-04T06:54:39,319 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=81.5 K, sizeToCheck=16.0 K 2024-12-04T06:54:39,319 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T06:54:39,319 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/00fa7dedc89c4a79bc12573c09802daa because midkey is the same as first or last row 2024-12-04T06:54:39,324 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 90e74c91b24a6a418dfd5e19734cb3a0/info of 90e74c91b24a6a418dfd5e19734cb3a0 into b1e95095dae64b5db938d13d52e1762f(size=43.9 K), total size for store is 71.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T06:54:39,324 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 90e74c91b24a6a418dfd5e19734cb3a0: 2024-12-04T06:54:39,324 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0., storeName=90e74c91b24a6a418dfd5e19734cb3a0/info, priority=13, startTime=1733295279273; duration=0sec 2024-12-04T06:54:39,324 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.9 K, sizeToCheck=16.0 K 2024-12-04T06:54:39,324 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T06:54:39,324 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/b1e95095dae64b5db938d13d52e1762f because midkey is the same as first or last row 2024-12-04T06:54:39,324 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.9 K, sizeToCheck=16.0 K 2024-12-04T06:54:39,324 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T06:54:39,324 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/b1e95095dae64b5db938d13d52e1762f because midkey is the same as first or last row 2024-12-04T06:54:39,324 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.9 K, sizeToCheck=16.0 K 2024-12-04T06:54:39,324 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T06:54:39,324 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/b1e95095dae64b5db938d13d52e1762f because midkey is the same as first or last row 2024-12-04T06:54:39,324 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T06:54:39,325 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 90e74c91b24a6a418dfd5e19734cb3a0:info 2024-12-04T06:54:39,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:40,129 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:40,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:41,130 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:41,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32897 {}] regionserver.HRegion(8855): Flush requested on 90e74c91b24a6a418dfd5e19734cb3a0 2024-12-04T06:54:41,288 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 90e74c91b24a6a418dfd5e19734cb3a0 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-04T06:54:41,294 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/.tmp/info/bf46df139b774a868d13fbb3663a62a2 is 1080, key is row0060/info:/1733295279274/Put/seqid=0 2024-12-04T06:54:41,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741842_1018 (size=12509) 2024-12-04T06:54:41,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741842_1018 (size=12509) 2024-12-04T06:54:41,302 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=83 (bloomFilter=true), to=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/.tmp/info/bf46df139b774a868d13fbb3663a62a2 2024-12-04T06:54:41,308 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/.tmp/info/bf46df139b774a868d13fbb3663a62a2 as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/bf46df139b774a868d13fbb3663a62a2 2024-12-04T06:54:41,315 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/bf46df139b774a868d13fbb3663a62a2, entries=7, sequenceid=83, filesize=12.2 K 2024-12-04T06:54:41,317 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=15.76 KB/16140 for 90e74c91b24a6a418dfd5e19734cb3a0 in 29ms, sequenceid=83, compaction requested=true 2024-12-04T06:54:41,317 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 90e74c91b24a6a418dfd5e19734cb3a0: 2024-12-04T06:54:41,317 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=84.2 K, sizeToCheck=16.0 K 2024-12-04T06:54:41,317 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T06:54:41,317 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/b1e95095dae64b5db938d13d52e1762f because midkey is the same as first or last row 2024-12-04T06:54:41,317 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 90e74c91b24a6a418dfd5e19734cb3a0:info, priority=-2147483648, current under compaction store size is 1 2024-12-04T06:54:41,317 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T06:54:41,317 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T06:54:41,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32897 {}] regionserver.HRegion(8855): Flush requested on 90e74c91b24a6a418dfd5e19734cb3a0 2024-12-04T06:54:41,319 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 90e74c91b24a6a418dfd5e19734cb3a0 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-12-04T06:54:41,319 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 86171 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T06:54:41,319 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HStore(1541): 90e74c91b24a6a418dfd5e19734cb3a0/info is initiating minor compaction (all files) 2024-12-04T06:54:41,319 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 90e74c91b24a6a418dfd5e19734cb3a0/info in TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0. 2024-12-04T06:54:41,319 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/b1e95095dae64b5db938d13d52e1762f, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/22a340923e154d1393f4f8062dde7120, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/bf46df139b774a868d13fbb3663a62a2] into tmpdir=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/.tmp, totalSize=84.2 K 2024-12-04T06:54:41,319 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.Compactor(225): Compacting b1e95095dae64b5db938d13d52e1762f, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733295277166 2024-12-04T06:54:41,320 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.Compactor(225): Compacting 22a340923e154d1393f4f8062dde7120, keycount=22, bloomtype=ROW, size=28.0 K, encoding=NONE, compression=NONE, seqNum=72, earliestPutTs=1733295279235 2024-12-04T06:54:41,320 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.Compactor(225): Compacting bf46df139b774a868d13fbb3663a62a2, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1733295279274 2024-12-04T06:54:41,326 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/.tmp/info/bfee17580ca44fd38a26bb4f79c8183a is 1080, key is row0067/info:/1733295281290/Put/seqid=0 2024-12-04T06:54:41,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741843_1019 (size=22222) 2024-12-04T06:54:41,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741843_1019 (size=22222) 2024-12-04T06:54:41,345 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 90e74c91b24a6a418dfd5e19734cb3a0#info#compaction#60 average throughput is 16.93 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T06:54:41,345 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=102 (bloomFilter=true), to=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/.tmp/info/bfee17580ca44fd38a26bb4f79c8183a 2024-12-04T06:54:41,346 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/.tmp/info/333f35176f324900a887efdce337d1f5 is 1080, key is row0001/info:/1733295277166/Put/seqid=0 2024-12-04T06:54:41,349 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32897 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=90e74c91b24a6a418dfd5e19734cb3a0, server=607fd5c6574c,32897,1733295266166 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-04T06:54:41,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32897 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:43836 deadline: 1733295291348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=90e74c91b24a6a418dfd5e19734cb3a0, server=607fd5c6574c,32897,1733295266166 2024-12-04T06:54:41,353 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/.tmp/info/bfee17580ca44fd38a26bb4f79c8183a as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/bfee17580ca44fd38a26bb4f79c8183a 2024-12-04T06:54:41,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:41,360 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/bfee17580ca44fd38a26bb4f79c8183a, entries=16, sequenceid=102, filesize=21.7 K 2024-12-04T06:54:41,361 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=13.66 KB/13988 for 90e74c91b24a6a418dfd5e19734cb3a0 in 42ms, sequenceid=102, compaction requested=false 2024-12-04T06:54:41,361 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 90e74c91b24a6a418dfd5e19734cb3a0: 2024-12-04T06:54:41,361 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=105.9 K, sizeToCheck=16.0 K 2024-12-04T06:54:41,361 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T06:54:41,361 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/b1e95095dae64b5db938d13d52e1762f because midkey is the same as first or last row 2024-12-04T06:54:41,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741844_1020 (size=76455) 2024-12-04T06:54:41,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741844_1020 (size=76455) 2024-12-04T06:54:41,378 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0., hostname=607fd5c6574c,32897,1733295266166, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0., hostname=607fd5c6574c,32897,1733295266166, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=90e74c91b24a6a418dfd5e19734cb3a0, server=607fd5c6574c,32897,1733295266166 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-04T06:54:41,378 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0., hostname=607fd5c6574c,32897,1733295266166, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=90e74c91b24a6a418dfd5e19734cb3a0, server=607fd5c6574c,32897,1733295266166 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-04T06:54:41,379 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0., hostname=607fd5c6574c,32897,1733295266166, seqNum=2 because the exception is null or not the one we care about 2024-12-04T06:54:41,379 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/.tmp/info/333f35176f324900a887efdce337d1f5 as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/333f35176f324900a887efdce337d1f5 2024-12-04T06:54:41,387 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 90e74c91b24a6a418dfd5e19734cb3a0/info of 90e74c91b24a6a418dfd5e19734cb3a0 into 333f35176f324900a887efdce337d1f5(size=74.7 K), total size for store is 96.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T06:54:41,387 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 90e74c91b24a6a418dfd5e19734cb3a0: 2024-12-04T06:54:41,387 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0., storeName=90e74c91b24a6a418dfd5e19734cb3a0/info, priority=13, startTime=1733295281317; duration=0sec 2024-12-04T06:54:41,387 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=96.4 K, sizeToCheck=16.0 K 2024-12-04T06:54:41,387 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T06:54:41,387 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=96.4 K, sizeToCheck=16.0 K 2024-12-04T06:54:41,387 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T06:54:41,387 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=96.4 K, sizeToCheck=16.0 K 2024-12-04T06:54:41,387 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T06:54:41,389 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T06:54:41,389 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T06:54:41,389 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 90e74c91b24a6a418dfd5e19734cb3a0:info 2024-12-04T06:54:41,390 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36799 {}] assignment.AssignmentManager(1363): Split request from 607fd5c6574c,32897,1733295266166, parent={ENCODED => 90e74c91b24a6a418dfd5e19734cb3a0, NAME => 'TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-12-04T06:54:41,396 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36799 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=607fd5c6574c,32897,1733295266166 2024-12-04T06:54:41,401 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36799 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=90e74c91b24a6a418dfd5e19734cb3a0, daughterA=4d030d34fff502afc8453579b74c191a, daughterB=23e1e4928f005993e4764c67507acbfd 2024-12-04T06:54:41,403 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=90e74c91b24a6a418dfd5e19734cb3a0, daughterA=4d030d34fff502afc8453579b74c191a, daughterB=23e1e4928f005993e4764c67507acbfd 2024-12-04T06:54:41,403 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=90e74c91b24a6a418dfd5e19734cb3a0, daughterA=4d030d34fff502afc8453579b74c191a, daughterB=23e1e4928f005993e4764c67507acbfd 2024-12-04T06:54:41,403 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=90e74c91b24a6a418dfd5e19734cb3a0, daughterA=4d030d34fff502afc8453579b74c191a, daughterB=23e1e4928f005993e4764c67507acbfd 2024-12-04T06:54:41,411 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=90e74c91b24a6a418dfd5e19734cb3a0, UNASSIGN}] 2024-12-04T06:54:41,412 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=90e74c91b24a6a418dfd5e19734cb3a0, UNASSIGN 2024-12-04T06:54:41,414 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=90e74c91b24a6a418dfd5e19734cb3a0, regionState=CLOSING, regionLocation=607fd5c6574c,32897,1733295266166 2024-12-04T06:54:41,416 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=90e74c91b24a6a418dfd5e19734cb3a0, UNASSIGN because future has completed 2024-12-04T06:54:41,417 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-04T06:54:41,417 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 90e74c91b24a6a418dfd5e19734cb3a0, server=607fd5c6574c,32897,1733295266166}] 2024-12-04T06:54:41,576 INFO [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 90e74c91b24a6a418dfd5e19734cb3a0 2024-12-04T06:54:41,576 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-04T06:54:41,577 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 90e74c91b24a6a418dfd5e19734cb3a0, disabling compactions & flushes 2024-12-04T06:54:41,577 INFO [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0. 2024-12-04T06:54:41,577 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0. 2024-12-04T06:54:41,577 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0. after waiting 0 ms 2024-12-04T06:54:41,577 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0. 2024-12-04T06:54:41,577 INFO [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 90e74c91b24a6a418dfd5e19734cb3a0 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-12-04T06:54:41,582 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/.tmp/info/e9edfd86d2eb4196a5625ee5bdbcfc2e is 1080, key is row0083/info:/1733295281320/Put/seqid=0 2024-12-04T06:54:41,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741845_1021 (size=18987) 2024-12-04T06:54:41,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741845_1021 (size=18987) 2024-12-04T06:54:41,589 INFO [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/.tmp/info/e9edfd86d2eb4196a5625ee5bdbcfc2e 2024-12-04T06:54:41,595 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/.tmp/info/e9edfd86d2eb4196a5625ee5bdbcfc2e as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/e9edfd86d2eb4196a5625ee5bdbcfc2e 2024-12-04T06:54:41,600 INFO [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/e9edfd86d2eb4196a5625ee5bdbcfc2e, entries=13, sequenceid=119, filesize=18.5 K 2024-12-04T06:54:41,601 INFO [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=0 B/0 for 90e74c91b24a6a418dfd5e19734cb3a0 in 24ms, sequenceid=119, compaction requested=true 2024-12-04T06:54:41,602 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/eaa2167f1b314780a7dbf8860fe9e934, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/00fa7dedc89c4a79bc12573c09802daa, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/b1e95095dae64b5db938d13d52e1762f, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/cbb9077b21b44d77b0e0a0acac2abc46, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/22a340923e154d1393f4f8062dde7120, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/bf46df139b774a868d13fbb3663a62a2] to archive 2024-12-04T06:54:41,603 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-04T06:54:41,605 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/eaa2167f1b314780a7dbf8860fe9e934 to hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/archive/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/eaa2167f1b314780a7dbf8860fe9e934 2024-12-04T06:54:41,606 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/00fa7dedc89c4a79bc12573c09802daa to hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/archive/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/00fa7dedc89c4a79bc12573c09802daa 2024-12-04T06:54:41,608 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/b1e95095dae64b5db938d13d52e1762f to hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/archive/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/b1e95095dae64b5db938d13d52e1762f 2024-12-04T06:54:41,609 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/cbb9077b21b44d77b0e0a0acac2abc46 to hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/archive/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/cbb9077b21b44d77b0e0a0acac2abc46 2024-12-04T06:54:41,610 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/22a340923e154d1393f4f8062dde7120 to hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/archive/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/22a340923e154d1393f4f8062dde7120 2024-12-04T06:54:41,611 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/bf46df139b774a868d13fbb3663a62a2 to hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/archive/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/bf46df139b774a868d13fbb3663a62a2 2024-12-04T06:54:41,617 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/recovered.edits/122.seqid, newMaxSeqId=122, maxSeqId=1 2024-12-04T06:54:41,618 INFO [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0. 2024-12-04T06:54:41,618 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 90e74c91b24a6a418dfd5e19734cb3a0: Waiting for close lock at 1733295281577Running coprocessor pre-close hooks at 1733295281577Disabling compacts and flushes for region at 1733295281577Disabling writes for close at 1733295281577Obtaining lock to block concurrent updates at 1733295281577Preparing flush snapshotting stores in 90e74c91b24a6a418dfd5e19734cb3a0 at 1733295281577Finished memstore snapshotting TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0., syncing WAL and waiting on mvcc, flushsize=dataSize=13988, getHeapSize=15216, getOffHeapSize=0, getCellsCount=13 at 1733295281578 (+1 ms)Flushing stores of TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0. at 1733295281578Flushing 90e74c91b24a6a418dfd5e19734cb3a0/info: creating writer at 1733295281579 (+1 ms)Flushing 90e74c91b24a6a418dfd5e19734cb3a0/info: appending metadata at 1733295281582 (+3 ms)Flushing 90e74c91b24a6a418dfd5e19734cb3a0/info: closing flushed file at 1733295281582Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5dd1366: reopening flushed file at 1733295281594 (+12 ms)Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=0 B/0 for 90e74c91b24a6a418dfd5e19734cb3a0 in 24ms, sequenceid=119, compaction requested=true at 1733295281601 (+7 ms)Writing region close event to WAL at 1733295281614 (+13 ms)Running coprocessor post-close hooks at 1733295281618 (+4 ms)Closed at 1733295281618 2024-12-04T06:54:41,621 INFO [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 90e74c91b24a6a418dfd5e19734cb3a0 2024-12-04T06:54:41,621 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=90e74c91b24a6a418dfd5e19734cb3a0, regionState=CLOSED 2024-12-04T06:54:41,624 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 90e74c91b24a6a418dfd5e19734cb3a0, server=607fd5c6574c,32897,1733295266166 because future has completed 2024-12-04T06:54:41,628 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-12-04T06:54:41,628 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 90e74c91b24a6a418dfd5e19734cb3a0, server=607fd5c6574c,32897,1733295266166 in 208 msec 2024-12-04T06:54:41,630 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-04T06:54:41,630 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=90e74c91b24a6a418dfd5e19734cb3a0, UNASSIGN in 217 msec 2024-12-04T06:54:41,639 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:54:41,642 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 3 storefiles, region=90e74c91b24a6a418dfd5e19734cb3a0, threads=3 2024-12-04T06:54:41,645 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/e9edfd86d2eb4196a5625ee5bdbcfc2e for region: 90e74c91b24a6a418dfd5e19734cb3a0 2024-12-04T06:54:41,645 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/333f35176f324900a887efdce337d1f5 for region: 90e74c91b24a6a418dfd5e19734cb3a0 2024-12-04T06:54:41,645 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/bfee17580ca44fd38a26bb4f79c8183a for region: 90e74c91b24a6a418dfd5e19734cb3a0 2024-12-04T06:54:41,655 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/bfee17580ca44fd38a26bb4f79c8183a, top=true 2024-12-04T06:54:41,657 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/e9edfd86d2eb4196a5625ee5bdbcfc2e, top=true 2024-12-04T06:54:41,671 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/TestLogRolling-testLogRolling=90e74c91b24a6a418dfd5e19734cb3a0-bfee17580ca44fd38a26bb4f79c8183a for child: 23e1e4928f005993e4764c67507acbfd, parent: 90e74c91b24a6a418dfd5e19734cb3a0 2024-12-04T06:54:41,672 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/bfee17580ca44fd38a26bb4f79c8183a for region: 90e74c91b24a6a418dfd5e19734cb3a0 2024-12-04T06:54:41,673 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/TestLogRolling-testLogRolling=90e74c91b24a6a418dfd5e19734cb3a0-e9edfd86d2eb4196a5625ee5bdbcfc2e for child: 23e1e4928f005993e4764c67507acbfd, parent: 90e74c91b24a6a418dfd5e19734cb3a0 2024-12-04T06:54:41,673 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/e9edfd86d2eb4196a5625ee5bdbcfc2e for region: 90e74c91b24a6a418dfd5e19734cb3a0 2024-12-04T06:54:41,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741846_1022 (size=27) 2024-12-04T06:54:41,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741846_1022 (size=27) 2024-12-04T06:54:41,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741847_1023 (size=27) 2024-12-04T06:54:41,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741847_1023 (size=27) 2024-12-04T06:54:41,688 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/333f35176f324900a887efdce337d1f5 for region: 90e74c91b24a6a418dfd5e19734cb3a0 2024-12-04T06:54:41,690 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 90e74c91b24a6a418dfd5e19734cb3a0 Daughter A: [hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/4d030d34fff502afc8453579b74c191a/info/333f35176f324900a887efdce337d1f5.90e74c91b24a6a418dfd5e19734cb3a0] storefiles, Daughter B: [hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/333f35176f324900a887efdce337d1f5.90e74c91b24a6a418dfd5e19734cb3a0, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/TestLogRolling-testLogRolling=90e74c91b24a6a418dfd5e19734cb3a0-bfee17580ca44fd38a26bb4f79c8183a, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/TestLogRolling-testLogRolling=90e74c91b24a6a418dfd5e19734cb3a0-e9edfd86d2eb4196a5625ee5bdbcfc2e] storefiles. 2024-12-04T06:54:41,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741848_1024 (size=71) 2024-12-04T06:54:41,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741848_1024 (size=71) 2024-12-04T06:54:41,700 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:54:41,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741849_1025 (size=71) 2024-12-04T06:54:41,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741849_1025 (size=71) 2024-12-04T06:54:41,714 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:54:41,725 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/4d030d34fff502afc8453579b74c191a/recovered.edits/122.seqid, newMaxSeqId=122, maxSeqId=-1 2024-12-04T06:54:41,727 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/recovered.edits/122.seqid, newMaxSeqId=122, maxSeqId=-1 2024-12-04T06:54:41,730 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733295281730"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1733295281730"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1733295281730"}]},"ts":"1733295281730"} 2024-12-04T06:54:41,730 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733295281396.4d030d34fff502afc8453579b74c191a.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733295281730"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733295281730"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733295281730"}]},"ts":"1733295281730"} 2024-12-04T06:54:41,730 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733295281730"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733295281730"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733295281730"}]},"ts":"1733295281730"} 2024-12-04T06:54:41,752 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4d030d34fff502afc8453579b74c191a, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=23e1e4928f005993e4764c67507acbfd, ASSIGN}] 2024-12-04T06:54:41,753 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4d030d34fff502afc8453579b74c191a, ASSIGN 2024-12-04T06:54:41,753 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=23e1e4928f005993e4764c67507acbfd, ASSIGN 2024-12-04T06:54:41,754 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4d030d34fff502afc8453579b74c191a, ASSIGN; state=SPLITTING_NEW, location=607fd5c6574c,32897,1733295266166; forceNewPlan=false, retain=false 2024-12-04T06:54:41,754 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=23e1e4928f005993e4764c67507acbfd, ASSIGN; state=SPLITTING_NEW, location=607fd5c6574c,32897,1733295266166; forceNewPlan=false, retain=false 2024-12-04T06:54:41,905 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=23e1e4928f005993e4764c67507acbfd, regionState=OPENING, regionLocation=607fd5c6574c,32897,1733295266166 2024-12-04T06:54:41,905 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=4d030d34fff502afc8453579b74c191a, regionState=OPENING, regionLocation=607fd5c6574c,32897,1733295266166 2024-12-04T06:54:41,907 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=23e1e4928f005993e4764c67507acbfd, ASSIGN because future has completed 2024-12-04T06:54:41,907 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 23e1e4928f005993e4764c67507acbfd, server=607fd5c6574c,32897,1733295266166}] 2024-12-04T06:54:41,908 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4d030d34fff502afc8453579b74c191a, ASSIGN because future has completed 2024-12-04T06:54:41,909 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4d030d34fff502afc8453579b74c191a, server=607fd5c6574c,32897,1733295266166}] 2024-12-04T06:54:42,064 INFO [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733295281396.4d030d34fff502afc8453579b74c191a. 2024-12-04T06:54:42,064 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 4d030d34fff502afc8453579b74c191a, NAME => 'TestLogRolling-testLogRolling,,1733295281396.4d030d34fff502afc8453579b74c191a.', STARTKEY => '', ENDKEY => 'row0062'} 2024-12-04T06:54:42,065 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 4d030d34fff502afc8453579b74c191a 2024-12-04T06:54:42,065 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733295281396.4d030d34fff502afc8453579b74c191a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:54:42,065 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 4d030d34fff502afc8453579b74c191a 2024-12-04T06:54:42,065 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 4d030d34fff502afc8453579b74c191a 2024-12-04T06:54:42,066 INFO [StoreOpener-4d030d34fff502afc8453579b74c191a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 4d030d34fff502afc8453579b74c191a 2024-12-04T06:54:42,067 INFO [StoreOpener-4d030d34fff502afc8453579b74c191a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4d030d34fff502afc8453579b74c191a columnFamilyName info 2024-12-04T06:54:42,067 DEBUG [StoreOpener-4d030d34fff502afc8453579b74c191a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:54:42,078 DEBUG [StoreOpener-4d030d34fff502afc8453579b74c191a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/4d030d34fff502afc8453579b74c191a/info/333f35176f324900a887efdce337d1f5.90e74c91b24a6a418dfd5e19734cb3a0->hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/333f35176f324900a887efdce337d1f5-bottom 2024-12-04T06:54:42,079 INFO [StoreOpener-4d030d34fff502afc8453579b74c191a-1 {}] regionserver.HStore(327): Store=4d030d34fff502afc8453579b74c191a/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T06:54:42,079 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 4d030d34fff502afc8453579b74c191a 2024-12-04T06:54:42,080 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/4d030d34fff502afc8453579b74c191a 2024-12-04T06:54:42,081 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/4d030d34fff502afc8453579b74c191a 2024-12-04T06:54:42,082 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 4d030d34fff502afc8453579b74c191a 2024-12-04T06:54:42,082 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 4d030d34fff502afc8453579b74c191a 2024-12-04T06:54:42,083 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 4d030d34fff502afc8453579b74c191a 2024-12-04T06:54:42,084 INFO [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 4d030d34fff502afc8453579b74c191a; next sequenceid=123; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=773653, jitterRate=-0.016249805688858032}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-04T06:54:42,084 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4d030d34fff502afc8453579b74c191a 2024-12-04T06:54:42,085 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 4d030d34fff502afc8453579b74c191a: Running coprocessor pre-open hook at 1733295282065Writing region info on filesystem at 1733295282065Initializing all the Stores at 1733295282066 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295282066Cleaning up temporary data from old regions at 1733295282082 (+16 ms)Running coprocessor post-open hooks at 1733295282084 (+2 ms)Region opened successfully at 1733295282085 (+1 ms) 2024-12-04T06:54:42,086 INFO [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733295281396.4d030d34fff502afc8453579b74c191a., pid=13, masterSystemTime=1733295282060 2024-12-04T06:54:42,086 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 4d030d34fff502afc8453579b74c191a:info, priority=-2147483648, current under compaction store size is 1 2024-12-04T06:54:42,086 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T06:54:42,086 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-04T06:54:42,087 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1733295281396.4d030d34fff502afc8453579b74c191a. 2024-12-04T06:54:42,087 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HStore(1541): 4d030d34fff502afc8453579b74c191a/info is initiating minor compaction (all files) 2024-12-04T06:54:42,087 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4d030d34fff502afc8453579b74c191a/info in TestLogRolling-testLogRolling,,1733295281396.4d030d34fff502afc8453579b74c191a. 2024-12-04T06:54:42,087 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/4d030d34fff502afc8453579b74c191a/info/333f35176f324900a887efdce337d1f5.90e74c91b24a6a418dfd5e19734cb3a0->hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/333f35176f324900a887efdce337d1f5-bottom] into tmpdir=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/4d030d34fff502afc8453579b74c191a/.tmp, totalSize=74.7 K 2024-12-04T06:54:42,088 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.Compactor(225): Compacting 333f35176f324900a887efdce337d1f5.90e74c91b24a6a418dfd5e19734cb3a0, keycount=33, bloomtype=ROW, size=74.7 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1733295277166 2024-12-04T06:54:42,089 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733295281396.4d030d34fff502afc8453579b74c191a. 2024-12-04T06:54:42,089 INFO [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733295281396.4d030d34fff502afc8453579b74c191a. 2024-12-04T06:54:42,089 INFO [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd. 2024-12-04T06:54:42,089 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 23e1e4928f005993e4764c67507acbfd, NAME => 'TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd.', STARTKEY => 'row0062', ENDKEY => ''} 2024-12-04T06:54:42,089 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 23e1e4928f005993e4764c67507acbfd 2024-12-04T06:54:42,089 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:54:42,089 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 23e1e4928f005993e4764c67507acbfd 2024-12-04T06:54:42,089 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 23e1e4928f005993e4764c67507acbfd 2024-12-04T06:54:42,090 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=4d030d34fff502afc8453579b74c191a, regionState=OPEN, openSeqNum=123, regionLocation=607fd5c6574c,32897,1733295266166 2024-12-04T06:54:42,091 INFO [StoreOpener-23e1e4928f005993e4764c67507acbfd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 23e1e4928f005993e4764c67507acbfd 2024-12-04T06:54:42,091 INFO [StoreOpener-23e1e4928f005993e4764c67507acbfd-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 23e1e4928f005993e4764c67507acbfd columnFamilyName info 2024-12-04T06:54:42,091 DEBUG [StoreOpener-23e1e4928f005993e4764c67507acbfd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:54:42,092 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32897 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-12-04T06:54:42,092 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-12-04T06:54:42,092 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-12-04T06:54:42,092 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4d030d34fff502afc8453579b74c191a, server=607fd5c6574c,32897,1733295266166 because future has completed 2024-12-04T06:54:42,096 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=10 2024-12-04T06:54:42,097 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 4d030d34fff502afc8453579b74c191a, server=607fd5c6574c,32897,1733295266166 in 185 msec 2024-12-04T06:54:42,098 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4d030d34fff502afc8453579b74c191a, ASSIGN in 345 msec 2024-12-04T06:54:42,107 DEBUG [StoreOpener-23e1e4928f005993e4764c67507acbfd-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/333f35176f324900a887efdce337d1f5.90e74c91b24a6a418dfd5e19734cb3a0->hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/333f35176f324900a887efdce337d1f5-top 2024-12-04T06:54:42,112 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/hbase/meta/1588230740/.tmp/info/0cbd4e0424f940a8972fd2302e7d6e5f is 193, key is TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd./info:regioninfo/1733295281905/Put/seqid=0 2024-12-04T06:54:42,114 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4d030d34fff502afc8453579b74c191a#info#compaction#62 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T06:54:42,114 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/4d030d34fff502afc8453579b74c191a/.tmp/info/da2586526fb241b0b99196b44f6e2595 is 1080, key is row0001/info:/1733295277166/Put/seqid=0 2024-12-04T06:54:42,114 DEBUG [StoreOpener-23e1e4928f005993e4764c67507acbfd-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/TestLogRolling-testLogRolling=90e74c91b24a6a418dfd5e19734cb3a0-bfee17580ca44fd38a26bb4f79c8183a 2024-12-04T06:54:42,119 DEBUG [StoreOpener-23e1e4928f005993e4764c67507acbfd-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/TestLogRolling-testLogRolling=90e74c91b24a6a418dfd5e19734cb3a0-e9edfd86d2eb4196a5625ee5bdbcfc2e 2024-12-04T06:54:42,119 INFO [StoreOpener-23e1e4928f005993e4764c67507acbfd-1 {}] regionserver.HStore(327): Store=23e1e4928f005993e4764c67507acbfd/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T06:54:42,119 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 23e1e4928f005993e4764c67507acbfd 2024-12-04T06:54:42,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741851_1027 (size=70862) 2024-12-04T06:54:42,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741850_1026 (size=9847) 2024-12-04T06:54:42,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741851_1027 (size=70862) 2024-12-04T06:54:42,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741850_1026 (size=9847) 2024-12-04T06:54:42,121 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd 2024-12-04T06:54:42,122 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/hbase/meta/1588230740/.tmp/info/0cbd4e0424f940a8972fd2302e7d6e5f 2024-12-04T06:54:42,122 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd 2024-12-04T06:54:42,123 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 23e1e4928f005993e4764c67507acbfd 2024-12-04T06:54:42,123 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 23e1e4928f005993e4764c67507acbfd 2024-12-04T06:54:42,125 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 23e1e4928f005993e4764c67507acbfd 2024-12-04T06:54:42,126 INFO [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 23e1e4928f005993e4764c67507acbfd; next sequenceid=123; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=717756, jitterRate=-0.08732648193836212}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-04T06:54:42,126 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 23e1e4928f005993e4764c67507acbfd 2024-12-04T06:54:42,127 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 23e1e4928f005993e4764c67507acbfd: Running coprocessor pre-open hook at 1733295282089Writing region info on filesystem at 1733295282089Initializing all the Stores at 1733295282090 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295282090Cleaning up temporary data from old regions at 1733295282123 (+33 ms)Running coprocessor post-open hooks at 1733295282126 (+3 ms)Region opened successfully at 1733295282127 (+1 ms) 2024-12-04T06:54:42,128 INFO [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd., pid=12, masterSystemTime=1733295282060 2024-12-04T06:54:42,128 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/4d030d34fff502afc8453579b74c191a/.tmp/info/da2586526fb241b0b99196b44f6e2595 as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/4d030d34fff502afc8453579b74c191a/info/da2586526fb241b0b99196b44f6e2595 2024-12-04T06:54:42,128 DEBUG [RS:0;607fd5c6574c:32897-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T06:54:42,129 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 23e1e4928f005993e4764c67507acbfd:info, priority=-2147483648, current under compaction store size is 2 2024-12-04T06:54:42,129 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T06:54:42,130 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:42,131 INFO [RS:0;607fd5c6574c:32897-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd. 2024-12-04T06:54:42,131 DEBUG [RS:0;607fd5c6574c:32897-longCompactions-0 {}] regionserver.HStore(1541): 23e1e4928f005993e4764c67507acbfd/info is initiating minor compaction (all files) 2024-12-04T06:54:42,131 INFO [RS:0;607fd5c6574c:32897-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 23e1e4928f005993e4764c67507acbfd/info in TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd. 2024-12-04T06:54:42,132 INFO [RS:0;607fd5c6574c:32897-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/333f35176f324900a887efdce337d1f5.90e74c91b24a6a418dfd5e19734cb3a0->hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/333f35176f324900a887efdce337d1f5-top, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/TestLogRolling-testLogRolling=90e74c91b24a6a418dfd5e19734cb3a0-bfee17580ca44fd38a26bb4f79c8183a, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/TestLogRolling-testLogRolling=90e74c91b24a6a418dfd5e19734cb3a0-e9edfd86d2eb4196a5625ee5bdbcfc2e] into tmpdir=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp, totalSize=114.9 K 2024-12-04T06:54:42,133 DEBUG [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd. 2024-12-04T06:54:42,133 INFO [RS_OPEN_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd. 2024-12-04T06:54:42,133 DEBUG [RS:0;607fd5c6574c:32897-longCompactions-0 {}] compactions.Compactor(225): Compacting 333f35176f324900a887efdce337d1f5.90e74c91b24a6a418dfd5e19734cb3a0, keycount=33, bloomtype=ROW, size=74.7 K, encoding=NONE, compression=NONE, seqNum=84, earliestPutTs=1733295277166 2024-12-04T06:54:42,134 DEBUG [RS:0;607fd5c6574c:32897-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=90e74c91b24a6a418dfd5e19734cb3a0-bfee17580ca44fd38a26bb4f79c8183a, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1733295281290 2024-12-04T06:54:42,134 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=23e1e4928f005993e4764c67507acbfd, regionState=OPEN, openSeqNum=123, regionLocation=607fd5c6574c,32897,1733295266166 2024-12-04T06:54:42,134 DEBUG [RS:0;607fd5c6574c:32897-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=90e74c91b24a6a418dfd5e19734cb3a0-e9edfd86d2eb4196a5625ee5bdbcfc2e, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733295281320 2024-12-04T06:54:42,137 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 23e1e4928f005993e4764c67507acbfd, server=607fd5c6574c,32897,1733295266166 because future has completed 2024-12-04T06:54:42,138 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 4d030d34fff502afc8453579b74c191a/info of 4d030d34fff502afc8453579b74c191a into da2586526fb241b0b99196b44f6e2595(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T06:54:42,138 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4d030d34fff502afc8453579b74c191a: 2024-12-04T06:54:42,138 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733295281396.4d030d34fff502afc8453579b74c191a., storeName=4d030d34fff502afc8453579b74c191a/info, priority=15, startTime=1733295282086; duration=0sec 2024-12-04T06:54:42,139 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T06:54:42,139 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4d030d34fff502afc8453579b74c191a:info 2024-12-04T06:54:42,142 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-12-04T06:54:42,142 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 23e1e4928f005993e4764c67507acbfd, server=607fd5c6574c,32897,1733295266166 in 232 msec 2024-12-04T06:54:42,145 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-12-04T06:54:42,145 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=23e1e4928f005993e4764c67507acbfd, ASSIGN in 390 msec 2024-12-04T06:54:42,147 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=90e74c91b24a6a418dfd5e19734cb3a0, daughterA=4d030d34fff502afc8453579b74c191a, daughterB=23e1e4928f005993e4764c67507acbfd in 749 msec 2024-12-04T06:54:42,150 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/hbase/meta/1588230740/.tmp/ns/8d9c46f3e31742069c9b08765f050dd0 is 43, key is default/ns:d/1733295267017/Put/seqid=0 2024-12-04T06:54:42,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741852_1028 (size=5153) 2024-12-04T06:54:42,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741852_1028 (size=5153) 2024-12-04T06:54:42,161 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/hbase/meta/1588230740/.tmp/ns/8d9c46f3e31742069c9b08765f050dd0 2024-12-04T06:54:42,171 INFO [RS:0;607fd5c6574c:32897-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 23e1e4928f005993e4764c67507acbfd#info#compaction#65 average throughput is 34.89 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T06:54:42,172 DEBUG [RS:0;607fd5c6574c:32897-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/29fdd3ce3ded42088f02583157bd7844 is 1080, key is row0062/info:/1733295279279/Put/seqid=0 2024-12-04T06:54:42,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741853_1029 (size=41907) 2024-12-04T06:54:42,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741853_1029 (size=41907) 2024-12-04T06:54:42,184 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/hbase/meta/1588230740/.tmp/table/9a82003ecef241b3bc6bf62aaf1154fd is 65, key is TestLogRolling-testLogRolling/table:state/1733295267473/Put/seqid=0 2024-12-04T06:54:42,188 DEBUG [RS:0;607fd5c6574c:32897-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/29fdd3ce3ded42088f02583157bd7844 as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/29fdd3ce3ded42088f02583157bd7844 2024-12-04T06:54:42,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741854_1030 (size=5340) 2024-12-04T06:54:42,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741854_1030 (size=5340) 2024-12-04T06:54:42,190 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/hbase/meta/1588230740/.tmp/table/9a82003ecef241b3bc6bf62aaf1154fd 2024-12-04T06:54:42,195 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/hbase/meta/1588230740/.tmp/info/0cbd4e0424f940a8972fd2302e7d6e5f as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/hbase/meta/1588230740/info/0cbd4e0424f940a8972fd2302e7d6e5f 2024-12-04T06:54:42,196 INFO [RS:0;607fd5c6574c:32897-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 23e1e4928f005993e4764c67507acbfd/info of 23e1e4928f005993e4764c67507acbfd into 29fdd3ce3ded42088f02583157bd7844(size=40.9 K), total size for store is 40.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T06:54:42,196 DEBUG [RS:0;607fd5c6574c:32897-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 23e1e4928f005993e4764c67507acbfd: 2024-12-04T06:54:42,196 INFO [RS:0;607fd5c6574c:32897-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd., storeName=23e1e4928f005993e4764c67507acbfd/info, priority=13, startTime=1733295282128; duration=0sec 2024-12-04T06:54:42,196 DEBUG [RS:0;607fd5c6574c:32897-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T06:54:42,196 DEBUG [RS:0;607fd5c6574c:32897-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 23e1e4928f005993e4764c67507acbfd:info 2024-12-04T06:54:42,200 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/hbase/meta/1588230740/info/0cbd4e0424f940a8972fd2302e7d6e5f, entries=30, sequenceid=17, filesize=9.6 K 2024-12-04T06:54:42,201 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/hbase/meta/1588230740/.tmp/ns/8d9c46f3e31742069c9b08765f050dd0 as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/hbase/meta/1588230740/ns/8d9c46f3e31742069c9b08765f050dd0 2024-12-04T06:54:42,206 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/hbase/meta/1588230740/ns/8d9c46f3e31742069c9b08765f050dd0, entries=2, sequenceid=17, filesize=5.0 K 2024-12-04T06:54:42,207 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/hbase/meta/1588230740/.tmp/table/9a82003ecef241b3bc6bf62aaf1154fd as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/hbase/meta/1588230740/table/9a82003ecef241b3bc6bf62aaf1154fd 2024-12-04T06:54:42,212 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/hbase/meta/1588230740/table/9a82003ecef241b3bc6bf62aaf1154fd, entries=2, sequenceid=17, filesize=5.2 K 2024-12-04T06:54:42,213 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 121ms, sequenceid=17, compaction requested=false 2024-12-04T06:54:42,214 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-04T06:54:42,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:43,131 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:43,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:44,131 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:44,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:45,132 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:45,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:46,133 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:46,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:47,121 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-04T06:54:47,122 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:47,123 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:47,123 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:47,123 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:47,124 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:47,124 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:47,125 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:47,125 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:47,133 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:47,148 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:47,149 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:47,149 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:47,149 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:47,149 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:47,150 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:47,153 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:47,153 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:47,154 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:47,157 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T06:54:47,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:48,134 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:48,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:49,135 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:49,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:50,135 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:50,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:51,136 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:51,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:51,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32897 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:43836 deadline: 1733295301447, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0. is not online on 607fd5c6574c,32897,1733295266166 2024-12-04T06:54:51,449 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0., hostname=607fd5c6574c,32897,1733295266166, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0., hostname=607fd5c6574c,32897,1733295266166, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0. is not online on 607fd5c6574c,32897,1733295266166 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-04T06:54:51,449 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0., hostname=607fd5c6574c,32897,1733295266166, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0. is not online on 607fd5c6574c,32897,1733295266166 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-04T06:54:51,449 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1733295267099.90e74c91b24a6a418dfd5e19734cb3a0., hostname=607fd5c6574c,32897,1733295266166, seqNum=2 from cache 2024-12-04T06:54:52,137 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:52,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:53,137 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:53,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:54,138 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:54,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:55,138 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:55,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:56,088 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-04T06:54:56,139 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:56,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:57,140 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:57,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:58,140 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:58,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:59,141 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:54:59,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:00,142 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:00,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:01,142 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:01,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:02,143 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:02,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:03,143 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:03,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:04,144 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:04,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:05,145 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:05,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:06,145 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:06,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:07,146 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:07,317 INFO [master/607fd5c6574c:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-04T06:55:07,317 INFO [master/607fd5c6574c:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-04T06:55:07,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:08,146 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:08,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:09,147 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:09,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:10,148 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:10,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:11,148 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:11,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:11,613 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0096', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd., hostname=607fd5c6574c,32897,1733295266166, seqNum=123] 2024-12-04T06:55:11,983 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-12-04T06:55:12,149 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:12,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:13,149 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:13,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:13,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32897 {}] regionserver.HRegion(8855): Flush requested on 23e1e4928f005993e4764c67507acbfd 2024-12-04T06:55:13,627 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 23e1e4928f005993e4764c67507acbfd 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-04T06:55:13,633 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/d9a01860595f4a8295b4886f203ef51b is 1080, key is row0096/info:/1733295311614/Put/seqid=0 2024-12-04T06:55:13,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741855_1031 (size=12514) 2024-12-04T06:55:13,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741855_1031 (size=12514) 2024-12-04T06:55:13,639 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/d9a01860595f4a8295b4886f203ef51b 2024-12-04T06:55:13,666 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/d9a01860595f4a8295b4886f203ef51b as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/d9a01860595f4a8295b4886f203ef51b 2024-12-04T06:55:13,678 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/d9a01860595f4a8295b4886f203ef51b, entries=7, sequenceid=133, filesize=12.2 K 2024-12-04T06:55:13,679 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=16.81 KB/17216 for 23e1e4928f005993e4764c67507acbfd in 52ms, sequenceid=133, compaction requested=false 2024-12-04T06:55:13,679 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 23e1e4928f005993e4764c67507acbfd: 2024-12-04T06:55:13,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32897 {}] regionserver.HRegion(8855): Flush requested on 23e1e4928f005993e4764c67507acbfd 2024-12-04T06:55:13,681 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 23e1e4928f005993e4764c67507acbfd 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-12-04T06:55:13,686 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/0dc425b96abd42c2beb8e21e084528b0 is 1080, key is row0103/info:/1733295313628/Put/seqid=0 2024-12-04T06:55:13,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741856_1032 (size=24394) 2024-12-04T06:55:13,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741856_1032 (size=24394) 2024-12-04T06:55:13,720 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/0dc425b96abd42c2beb8e21e084528b0 2024-12-04T06:55:13,731 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/0dc425b96abd42c2beb8e21e084528b0 as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/0dc425b96abd42c2beb8e21e084528b0 2024-12-04T06:55:13,739 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/0dc425b96abd42c2beb8e21e084528b0, entries=18, sequenceid=154, filesize=23.8 K 2024-12-04T06:55:13,741 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=8.41 KB/8608 for 23e1e4928f005993e4764c67507acbfd in 60ms, sequenceid=154, compaction requested=true 2024-12-04T06:55:13,741 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 23e1e4928f005993e4764c67507acbfd: 2024-12-04T06:55:13,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 23e1e4928f005993e4764c67507acbfd:info, priority=-2147483648, current under compaction store size is 1 2024-12-04T06:55:13,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T06:55:13,741 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T06:55:13,742 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 78815 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T06:55:13,742 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HStore(1541): 23e1e4928f005993e4764c67507acbfd/info is initiating minor compaction (all files) 2024-12-04T06:55:13,743 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 23e1e4928f005993e4764c67507acbfd/info in TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd. 2024-12-04T06:55:13,743 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/29fdd3ce3ded42088f02583157bd7844, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/d9a01860595f4a8295b4886f203ef51b, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/0dc425b96abd42c2beb8e21e084528b0] into tmpdir=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp, totalSize=77.0 K 2024-12-04T06:55:13,743 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.Compactor(225): Compacting 29fdd3ce3ded42088f02583157bd7844, keycount=34, bloomtype=ROW, size=40.9 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733295279279 2024-12-04T06:55:13,744 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.Compactor(225): Compacting d9a01860595f4a8295b4886f203ef51b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733295311614 2024-12-04T06:55:13,744 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0dc425b96abd42c2beb8e21e084528b0, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1733295313628 2024-12-04T06:55:13,757 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 23e1e4928f005993e4764c67507acbfd#info#compaction#69 average throughput is 30.27 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T06:55:13,758 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/d17978525cde46dbb308aa45fb6a8e8b is 1080, key is row0062/info:/1733295279279/Put/seqid=0 2024-12-04T06:55:13,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741857_1033 (size=69025) 2024-12-04T06:55:13,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741857_1033 (size=69025) 2024-12-04T06:55:13,788 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/d17978525cde46dbb308aa45fb6a8e8b as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/d17978525cde46dbb308aa45fb6a8e8b 2024-12-04T06:55:13,797 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 23e1e4928f005993e4764c67507acbfd/info of 23e1e4928f005993e4764c67507acbfd into d17978525cde46dbb308aa45fb6a8e8b(size=67.4 K), total size for store is 67.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T06:55:13,797 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 23e1e4928f005993e4764c67507acbfd: 2024-12-04T06:55:13,798 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd., storeName=23e1e4928f005993e4764c67507acbfd/info, priority=13, startTime=1733295313741; duration=0sec 2024-12-04T06:55:13,798 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T06:55:13,798 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 23e1e4928f005993e4764c67507acbfd:info 2024-12-04T06:55:14,150 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:14,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:15,150 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:15,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:15,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32897 {}] regionserver.HRegion(8855): Flush requested on 23e1e4928f005993e4764c67507acbfd 2024-12-04T06:55:15,699 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 23e1e4928f005993e4764c67507acbfd 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-12-04T06:55:15,703 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/a7581316a2b747c38fd500061328ed79 is 1080, key is row0121/info:/1733295313682/Put/seqid=0 2024-12-04T06:55:15,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741858_1034 (size=14672) 2024-12-04T06:55:15,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741858_1034 (size=14672) 2024-12-04T06:55:15,711 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/a7581316a2b747c38fd500061328ed79 2024-12-04T06:55:15,718 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/a7581316a2b747c38fd500061328ed79 as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/a7581316a2b747c38fd500061328ed79 2024-12-04T06:55:15,724 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/a7581316a2b747c38fd500061328ed79, entries=9, sequenceid=167, filesize=14.3 K 2024-12-04T06:55:15,725 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=13.66 KB/13988 for 23e1e4928f005993e4764c67507acbfd in 26ms, sequenceid=167, compaction requested=false 2024-12-04T06:55:15,725 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 23e1e4928f005993e4764c67507acbfd: 2024-12-04T06:55:15,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32897 {}] regionserver.HRegion(8855): Flush requested on 23e1e4928f005993e4764c67507acbfd 2024-12-04T06:55:15,726 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 23e1e4928f005993e4764c67507acbfd 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-12-04T06:55:15,730 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/a9182a56cad742b2adaf2e8d7729bb39 is 1080, key is row0130/info:/1733295315700/Put/seqid=0 2024-12-04T06:55:15,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741859_1035 (size=20078) 2024-12-04T06:55:15,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741859_1035 (size=20078) 2024-12-04T06:55:15,737 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/a9182a56cad742b2adaf2e8d7729bb39 2024-12-04T06:55:15,746 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/a9182a56cad742b2adaf2e8d7729bb39 as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/a9182a56cad742b2adaf2e8d7729bb39 2024-12-04T06:55:15,751 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/a9182a56cad742b2adaf2e8d7729bb39, entries=14, sequenceid=184, filesize=19.6 K 2024-12-04T06:55:15,752 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=13.66 KB/13988 for 23e1e4928f005993e4764c67507acbfd in 26ms, sequenceid=184, compaction requested=true 2024-12-04T06:55:15,752 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 23e1e4928f005993e4764c67507acbfd: 2024-12-04T06:55:15,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 23e1e4928f005993e4764c67507acbfd:info, priority=-2147483648, current under compaction store size is 1 2024-12-04T06:55:15,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T06:55:15,752 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T06:55:15,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32897 {}] regionserver.HRegion(8855): Flush requested on 23e1e4928f005993e4764c67507acbfd 2024-12-04T06:55:15,753 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 23e1e4928f005993e4764c67507acbfd 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-12-04T06:55:15,753 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103775 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T06:55:15,753 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HStore(1541): 23e1e4928f005993e4764c67507acbfd/info is initiating minor compaction (all files) 2024-12-04T06:55:15,753 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 23e1e4928f005993e4764c67507acbfd/info in TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd. 2024-12-04T06:55:15,754 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/d17978525cde46dbb308aa45fb6a8e8b, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/a7581316a2b747c38fd500061328ed79, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/a9182a56cad742b2adaf2e8d7729bb39] into tmpdir=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp, totalSize=101.3 K 2024-12-04T06:55:15,754 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.Compactor(225): Compacting d17978525cde46dbb308aa45fb6a8e8b, keycount=59, bloomtype=ROW, size=67.4 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1733295279279 2024-12-04T06:55:15,754 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.Compactor(225): Compacting a7581316a2b747c38fd500061328ed79, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1733295313682 2024-12-04T06:55:15,755 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.Compactor(225): Compacting a9182a56cad742b2adaf2e8d7729bb39, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=184, earliestPutTs=1733295315700 2024-12-04T06:55:15,758 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/cfb69f173cf249e6b5c622b3e888c437 is 1080, key is row0144/info:/1733295315727/Put/seqid=0 2024-12-04T06:55:15,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741860_1036 (size=20078) 2024-12-04T06:55:15,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741860_1036 (size=20078) 2024-12-04T06:55:15,765 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=201 (bloomFilter=true), to=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/cfb69f173cf249e6b5c622b3e888c437 2024-12-04T06:55:15,768 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 23e1e4928f005993e4764c67507acbfd#info#compaction#73 average throughput is 42.07 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T06:55:15,769 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/7c7b1effe410455d881271c02dd02626 is 1080, key is row0062/info:/1733295279279/Put/seqid=0 2024-12-04T06:55:15,771 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/cfb69f173cf249e6b5c622b3e888c437 as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/cfb69f173cf249e6b5c622b3e888c437 2024-12-04T06:55:15,775 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/cfb69f173cf249e6b5c622b3e888c437, entries=14, sequenceid=201, filesize=19.6 K 2024-12-04T06:55:15,777 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=3.15 KB/3228 for 23e1e4928f005993e4764c67507acbfd in 23ms, sequenceid=201, compaction requested=false 2024-12-04T06:55:15,777 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 23e1e4928f005993e4764c67507acbfd: 2024-12-04T06:55:15,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741861_1037 (size=93998) 2024-12-04T06:55:15,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741861_1037 (size=93998) 2024-12-04T06:55:15,786 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/7c7b1effe410455d881271c02dd02626 as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/7c7b1effe410455d881271c02dd02626 2024-12-04T06:55:15,791 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 23e1e4928f005993e4764c67507acbfd/info of 23e1e4928f005993e4764c67507acbfd into 7c7b1effe410455d881271c02dd02626(size=91.8 K), total size for store is 111.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T06:55:15,791 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 23e1e4928f005993e4764c67507acbfd: 2024-12-04T06:55:15,791 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd., storeName=23e1e4928f005993e4764c67507acbfd/info, priority=13, startTime=1733295315752; duration=0sec 2024-12-04T06:55:15,791 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T06:55:15,791 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 23e1e4928f005993e4764c67507acbfd:info 2024-12-04T06:55:16,151 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:16,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:17,152 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:17,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:17,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32897 {}] regionserver.HRegion(8855): Flush requested on 23e1e4928f005993e4764c67507acbfd 2024-12-04T06:55:17,767 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 23e1e4928f005993e4764c67507acbfd 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-04T06:55:17,772 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/d077c5ee51e0401b8045113a17d369a1 is 1080, key is row0158/info:/1733295315754/Put/seqid=0 2024-12-04T06:55:17,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741862_1038 (size=12516) 2024-12-04T06:55:17,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741862_1038 (size=12516) 2024-12-04T06:55:17,785 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/d077c5ee51e0401b8045113a17d369a1 2024-12-04T06:55:17,792 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/d077c5ee51e0401b8045113a17d369a1 as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/d077c5ee51e0401b8045113a17d369a1 2024-12-04T06:55:17,798 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/d077c5ee51e0401b8045113a17d369a1, entries=7, sequenceid=212, filesize=12.2 K 2024-12-04T06:55:17,800 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=18.91 KB/19368 for 23e1e4928f005993e4764c67507acbfd in 32ms, sequenceid=212, compaction requested=true 2024-12-04T06:55:17,800 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 23e1e4928f005993e4764c67507acbfd: 2024-12-04T06:55:17,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 23e1e4928f005993e4764c67507acbfd:info, priority=-2147483648, current under compaction store size is 1 2024-12-04T06:55:17,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T06:55:17,800 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T06:55:17,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32897 {}] regionserver.HRegion(8855): Flush requested on 23e1e4928f005993e4764c67507acbfd 2024-12-04T06:55:17,801 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 23e1e4928f005993e4764c67507acbfd 1/1 column families, dataSize=19.96 KB heapSize=21.63 KB 2024-12-04T06:55:17,801 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 126592 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T06:55:17,801 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HStore(1541): 23e1e4928f005993e4764c67507acbfd/info is initiating minor compaction (all files) 2024-12-04T06:55:17,801 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 23e1e4928f005993e4764c67507acbfd/info in TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd. 2024-12-04T06:55:17,801 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/7c7b1effe410455d881271c02dd02626, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/cfb69f173cf249e6b5c622b3e888c437, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/d077c5ee51e0401b8045113a17d369a1] into tmpdir=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp, totalSize=123.6 K 2024-12-04T06:55:17,802 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7c7b1effe410455d881271c02dd02626, keycount=82, bloomtype=ROW, size=91.8 K, encoding=NONE, compression=NONE, seqNum=184, earliestPutTs=1733295279279 2024-12-04T06:55:17,802 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.Compactor(225): Compacting cfb69f173cf249e6b5c622b3e888c437, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1733295315727 2024-12-04T06:55:17,803 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.Compactor(225): Compacting d077c5ee51e0401b8045113a17d369a1, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1733295315754 2024-12-04T06:55:17,806 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/00c9d3168d8e4bbbadbc35ccf76433e3 is 1080, key is row0165/info:/1733295317768/Put/seqid=0 2024-12-04T06:55:17,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741863_1039 (size=25472) 2024-12-04T06:55:17,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741863_1039 (size=25472) 2024-12-04T06:55:17,818 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=19.96 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/00c9d3168d8e4bbbadbc35ccf76433e3 2024-12-04T06:55:17,823 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 23e1e4928f005993e4764c67507acbfd#info#compaction#76 average throughput is 35.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T06:55:17,823 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/b60e38fef86547ed8fb4e8567ceb5549 is 1080, key is row0062/info:/1733295279279/Put/seqid=0 2024-12-04T06:55:17,826 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/00c9d3168d8e4bbbadbc35ccf76433e3 as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/00c9d3168d8e4bbbadbc35ccf76433e3 2024-12-04T06:55:17,832 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/00c9d3168d8e4bbbadbc35ccf76433e3, entries=19, sequenceid=234, filesize=24.9 K 2024-12-04T06:55:17,833 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~19.96 KB/20444, heapSize ~21.61 KB/22128, currentSize=9.46 KB/9684 for 23e1e4928f005993e4764c67507acbfd in 31ms, sequenceid=234, compaction requested=false 2024-12-04T06:55:17,833 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 23e1e4928f005993e4764c67507acbfd: 2024-12-04T06:55:17,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741864_1040 (size=116742) 2024-12-04T06:55:17,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741864_1040 (size=116742) 2024-12-04T06:55:17,843 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/b60e38fef86547ed8fb4e8567ceb5549 as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/b60e38fef86547ed8fb4e8567ceb5549 2024-12-04T06:55:17,850 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 23e1e4928f005993e4764c67507acbfd/info of 23e1e4928f005993e4764c67507acbfd into b60e38fef86547ed8fb4e8567ceb5549(size=114.0 K), total size for store is 138.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T06:55:17,850 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 23e1e4928f005993e4764c67507acbfd: 2024-12-04T06:55:17,850 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd., storeName=23e1e4928f005993e4764c67507acbfd/info, priority=13, startTime=1733295317800; duration=0sec 2024-12-04T06:55:17,850 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T06:55:17,850 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 23e1e4928f005993e4764c67507acbfd:info 2024-12-04T06:55:18,152 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:18,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:19,153 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:19,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:19,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32897 {}] regionserver.HRegion(8855): Flush requested on 23e1e4928f005993e4764c67507acbfd 2024-12-04T06:55:19,823 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 23e1e4928f005993e4764c67507acbfd 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-12-04T06:55:19,827 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/4a22486e53fa47c792d30b76662c4b44 is 1080, key is row0184/info:/1733295317802/Put/seqid=0 2024-12-04T06:55:19,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741865_1041 (size=15750) 2024-12-04T06:55:19,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741865_1041 (size=15750) 2024-12-04T06:55:19,841 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/4a22486e53fa47c792d30b76662c4b44 2024-12-04T06:55:19,847 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/4a22486e53fa47c792d30b76662c4b44 as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/4a22486e53fa47c792d30b76662c4b44 2024-12-04T06:55:19,854 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/4a22486e53fa47c792d30b76662c4b44, entries=10, sequenceid=248, filesize=15.4 K 2024-12-04T06:55:19,855 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=16.81 KB/17216 for 23e1e4928f005993e4764c67507acbfd in 32ms, sequenceid=248, compaction requested=true 2024-12-04T06:55:19,855 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 23e1e4928f005993e4764c67507acbfd: 2024-12-04T06:55:19,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 23e1e4928f005993e4764c67507acbfd:info, priority=-2147483648, current under compaction store size is 1 2024-12-04T06:55:19,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32897 {}] regionserver.HRegion(8855): Flush requested on 23e1e4928f005993e4764c67507acbfd 2024-12-04T06:55:19,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T06:55:19,855 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T06:55:19,855 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 23e1e4928f005993e4764c67507acbfd 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-12-04T06:55:19,857 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 157964 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T06:55:19,857 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HStore(1541): 23e1e4928f005993e4764c67507acbfd/info is initiating minor compaction (all files) 2024-12-04T06:55:19,857 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 23e1e4928f005993e4764c67507acbfd/info in TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd. 2024-12-04T06:55:19,857 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/b60e38fef86547ed8fb4e8567ceb5549, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/00c9d3168d8e4bbbadbc35ccf76433e3, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/4a22486e53fa47c792d30b76662c4b44] into tmpdir=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp, totalSize=154.3 K 2024-12-04T06:55:19,858 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.Compactor(225): Compacting b60e38fef86547ed8fb4e8567ceb5549, keycount=103, bloomtype=ROW, size=114.0 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1733295279279 2024-12-04T06:55:19,858 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.Compactor(225): Compacting 00c9d3168d8e4bbbadbc35ccf76433e3, keycount=19, bloomtype=ROW, size=24.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1733295317768 2024-12-04T06:55:19,859 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4a22486e53fa47c792d30b76662c4b44, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733295317802 2024-12-04T06:55:19,860 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/b736a1c09d884668837c3eda0398b80b is 1080, key is row0194/info:/1733295319824/Put/seqid=0 2024-12-04T06:55:19,879 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32897 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=23e1e4928f005993e4764c67507acbfd, server=607fd5c6574c,32897,1733295266166 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-04T06:55:19,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32897 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:43836 deadline: 1733295329878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=23e1e4928f005993e4764c67507acbfd, server=607fd5c6574c,32897,1733295266166 2024-12-04T06:55:19,879 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd., hostname=607fd5c6574c,32897,1733295266166, seqNum=123 , the old value is region=TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd., hostname=607fd5c6574c,32897,1733295266166, seqNum=123, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=23e1e4928f005993e4764c67507acbfd, server=607fd5c6574c,32897,1733295266166 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-04T06:55:19,880 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd., hostname=607fd5c6574c,32897,1733295266166, seqNum=123 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=23e1e4928f005993e4764c67507acbfd, server=607fd5c6574c,32897,1733295266166 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-04T06:55:19,880 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd., hostname=607fd5c6574c,32897,1733295266166, seqNum=123 because the exception is null or not the one we care about 2024-12-04T06:55:19,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741866_1042 (size=23327) 2024-12-04T06:55:19,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741866_1042 (size=23327) 2024-12-04T06:55:19,883 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=268 (bloomFilter=true), to=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/b736a1c09d884668837c3eda0398b80b 2024-12-04T06:55:19,887 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 23e1e4928f005993e4764c67507acbfd#info#compaction#79 average throughput is 45.15 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T06:55:19,888 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/1ad063d4559f4ffdb0eb159f00164c3b is 1080, key is row0062/info:/1733295279279/Put/seqid=0 2024-12-04T06:55:19,890 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/b736a1c09d884668837c3eda0398b80b as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/b736a1c09d884668837c3eda0398b80b 2024-12-04T06:55:19,896 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/b736a1c09d884668837c3eda0398b80b, entries=17, sequenceid=268, filesize=22.8 K 2024-12-04T06:55:19,897 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=12.61 KB/12912 for 23e1e4928f005993e4764c67507acbfd in 42ms, sequenceid=268, compaction requested=false 2024-12-04T06:55:19,897 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 23e1e4928f005993e4764c67507acbfd: 2024-12-04T06:55:19,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741867_1043 (size=148311) 2024-12-04T06:55:19,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741867_1043 (size=148311) 2024-12-04T06:55:19,905 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/1ad063d4559f4ffdb0eb159f00164c3b as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/1ad063d4559f4ffdb0eb159f00164c3b 2024-12-04T06:55:19,911 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 23e1e4928f005993e4764c67507acbfd/info of 23e1e4928f005993e4764c67507acbfd into 1ad063d4559f4ffdb0eb159f00164c3b(size=144.8 K), total size for store is 167.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T06:55:19,911 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 23e1e4928f005993e4764c67507acbfd: 2024-12-04T06:55:19,911 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd., storeName=23e1e4928f005993e4764c67507acbfd/info, priority=13, startTime=1733295319855; duration=0sec 2024-12-04T06:55:19,911 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T06:55:19,911 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 23e1e4928f005993e4764c67507acbfd:info 2024-12-04T06:55:20,153 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:20,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:21,154 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:21,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:22,155 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:22,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:23,155 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:23,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:24,156 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:24,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:25,156 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:25,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:26,088 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-04T06:55:26,157 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:26,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:27,065 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 4d030d34fff502afc8453579b74c191a, had cached 0 bytes from a total of 70862 2024-12-04T06:55:27,089 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 23e1e4928f005993e4764c67507acbfd, had cached 0 bytes from a total of 171638 2024-12-04T06:55:27,093 DEBUG [master/607fd5c6574c:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-04T06:55:27,157 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:27,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:28,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:28,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:29,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:29,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:29,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32897 {}] regionserver.HRegion(8855): Flush requested on 23e1e4928f005993e4764c67507acbfd 2024-12-04T06:55:29,920 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 23e1e4928f005993e4764c67507acbfd 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-12-04T06:55:29,924 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/0ed7daf7582c44639545dd1289f957c3 is 1080, key is row0211/info:/1733295319856/Put/seqid=0 2024-12-04T06:55:29,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741868_1044 (size=19013) 2024-12-04T06:55:29,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741868_1044 (size=19013) 2024-12-04T06:55:29,933 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/0ed7daf7582c44639545dd1289f957c3 2024-12-04T06:55:29,940 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/0ed7daf7582c44639545dd1289f957c3 as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/0ed7daf7582c44639545dd1289f957c3 2024-12-04T06:55:29,945 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/0ed7daf7582c44639545dd1289f957c3, entries=13, sequenceid=285, filesize=18.6 K 2024-12-04T06:55:29,947 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=1.05 KB/1076 for 23e1e4928f005993e4764c67507acbfd in 27ms, sequenceid=285, compaction requested=true 2024-12-04T06:55:29,947 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 23e1e4928f005993e4764c67507acbfd: 2024-12-04T06:55:29,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 23e1e4928f005993e4764c67507acbfd:info, priority=-2147483648, current under compaction store size is 1 2024-12-04T06:55:29,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T06:55:29,947 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T06:55:29,948 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 190651 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T06:55:29,948 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HStore(1541): 23e1e4928f005993e4764c67507acbfd/info is initiating minor compaction (all files) 2024-12-04T06:55:29,948 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 23e1e4928f005993e4764c67507acbfd/info in TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd. 2024-12-04T06:55:29,948 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/1ad063d4559f4ffdb0eb159f00164c3b, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/b736a1c09d884668837c3eda0398b80b, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/0ed7daf7582c44639545dd1289f957c3] into tmpdir=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp, totalSize=186.2 K 2024-12-04T06:55:29,949 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1ad063d4559f4ffdb0eb159f00164c3b, keycount=132, bloomtype=ROW, size=144.8 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733295279279 2024-12-04T06:55:29,949 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.Compactor(225): Compacting b736a1c09d884668837c3eda0398b80b, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=268, earliestPutTs=1733295319824 2024-12-04T06:55:29,949 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0ed7daf7582c44639545dd1289f957c3, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1733295319856 2024-12-04T06:55:29,963 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 23e1e4928f005993e4764c67507acbfd#info#compaction#81 average throughput is 55.41 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T06:55:29,964 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/d1b66bb04f2e4c4e97e9f59d73a49301 is 1080, key is row0062/info:/1733295279279/Put/seqid=0 2024-12-04T06:55:29,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741869_1045 (size=180785) 2024-12-04T06:55:29,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741869_1045 (size=180785) 2024-12-04T06:55:29,979 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/d1b66bb04f2e4c4e97e9f59d73a49301 as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/d1b66bb04f2e4c4e97e9f59d73a49301 2024-12-04T06:55:29,984 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 23e1e4928f005993e4764c67507acbfd/info of 23e1e4928f005993e4764c67507acbfd into d1b66bb04f2e4c4e97e9f59d73a49301(size=176.5 K), total size for store is 176.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T06:55:29,984 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 23e1e4928f005993e4764c67507acbfd: 2024-12-04T06:55:29,984 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd., storeName=23e1e4928f005993e4764c67507acbfd/info, priority=13, startTime=1733295329947; duration=0sec 2024-12-04T06:55:29,984 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T06:55:29,984 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 23e1e4928f005993e4764c67507acbfd:info 2024-12-04T06:55:30,159 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:30,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:31,160 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:31,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:31,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32897 {}] regionserver.HRegion(8855): Flush requested on 23e1e4928f005993e4764c67507acbfd 2024-12-04T06:55:31,934 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 23e1e4928f005993e4764c67507acbfd 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-04T06:55:31,939 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/5148dd6375cd4ee3b76d0cf5d74e7cf7 is 1080, key is row0224/info:/1733295329921/Put/seqid=0 2024-12-04T06:55:31,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741870_1046 (size=12523) 2024-12-04T06:55:31,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741870_1046 (size=12523) 2024-12-04T06:55:31,946 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/5148dd6375cd4ee3b76d0cf5d74e7cf7 2024-12-04T06:55:31,952 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/5148dd6375cd4ee3b76d0cf5d74e7cf7 as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/5148dd6375cd4ee3b76d0cf5d74e7cf7 2024-12-04T06:55:31,974 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/5148dd6375cd4ee3b76d0cf5d74e7cf7, entries=7, sequenceid=296, filesize=12.2 K 2024-12-04T06:55:31,975 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=21.02 KB/21520 for 23e1e4928f005993e4764c67507acbfd in 42ms, sequenceid=296, compaction requested=false 2024-12-04T06:55:31,975 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 23e1e4928f005993e4764c67507acbfd: 2024-12-04T06:55:31,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32897 {}] regionserver.HRegion(8855): Flush requested on 23e1e4928f005993e4764c67507acbfd 2024-12-04T06:55:31,975 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 23e1e4928f005993e4764c67507acbfd 1/1 column families, dataSize=22.07 KB heapSize=23.88 KB 2024-12-04T06:55:31,980 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/28a0b8235de0416485094cd965f40afd is 1080, key is row0231/info:/1733295331936/Put/seqid=0 2024-12-04T06:55:31,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741871_1047 (size=27649) 2024-12-04T06:55:31,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741871_1047 (size=27649) 2024-12-04T06:55:31,987 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.07 KB at sequenceid=320 (bloomFilter=true), to=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/28a0b8235de0416485094cd965f40afd 2024-12-04T06:55:31,993 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/28a0b8235de0416485094cd965f40afd as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/28a0b8235de0416485094cd965f40afd 2024-12-04T06:55:31,998 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/28a0b8235de0416485094cd965f40afd, entries=21, sequenceid=320, filesize=27.0 K 2024-12-04T06:55:31,999 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~22.07 KB/22596, heapSize ~23.86 KB/24432, currentSize=5.25 KB/5380 for 23e1e4928f005993e4764c67507acbfd in 24ms, sequenceid=320, compaction requested=true 2024-12-04T06:55:31,999 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 23e1e4928f005993e4764c67507acbfd: 2024-12-04T06:55:31,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 23e1e4928f005993e4764c67507acbfd:info, priority=-2147483648, current under compaction store size is 1 2024-12-04T06:55:31,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T06:55:31,999 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T06:55:32,001 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 220957 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T06:55:32,001 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HStore(1541): 23e1e4928f005993e4764c67507acbfd/info is initiating minor compaction (all files) 2024-12-04T06:55:32,001 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 23e1e4928f005993e4764c67507acbfd/info in TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd. 2024-12-04T06:55:32,001 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/d1b66bb04f2e4c4e97e9f59d73a49301, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/5148dd6375cd4ee3b76d0cf5d74e7cf7, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/28a0b8235de0416485094cd965f40afd] into tmpdir=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp, totalSize=215.8 K 2024-12-04T06:55:32,001 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.Compactor(225): Compacting d1b66bb04f2e4c4e97e9f59d73a49301, keycount=162, bloomtype=ROW, size=176.5 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1733295279279 2024-12-04T06:55:32,002 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5148dd6375cd4ee3b76d0cf5d74e7cf7, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1733295329921 2024-12-04T06:55:32,002 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] compactions.Compactor(225): Compacting 28a0b8235de0416485094cd965f40afd, keycount=21, bloomtype=ROW, size=27.0 K, encoding=NONE, compression=NONE, seqNum=320, earliestPutTs=1733295331936 2024-12-04T06:55:32,015 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 23e1e4928f005993e4764c67507acbfd#info#compaction#84 average throughput is 48.74 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T06:55:32,016 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/39450813f706431a9cdcc8fca83cd54c is 1080, key is row0062/info:/1733295279279/Put/seqid=0 2024-12-04T06:55:32,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741872_1048 (size=211176) 2024-12-04T06:55:32,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741872_1048 (size=211176) 2024-12-04T06:55:32,025 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/39450813f706431a9cdcc8fca83cd54c as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/39450813f706431a9cdcc8fca83cd54c 2024-12-04T06:55:32,031 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 23e1e4928f005993e4764c67507acbfd/info of 23e1e4928f005993e4764c67507acbfd into 39450813f706431a9cdcc8fca83cd54c(size=206.2 K), total size for store is 206.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T06:55:32,031 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 23e1e4928f005993e4764c67507acbfd: 2024-12-04T06:55:32,032 INFO [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd., storeName=23e1e4928f005993e4764c67507acbfd/info, priority=13, startTime=1733295331999; duration=0sec 2024-12-04T06:55:32,032 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T06:55:32,032 DEBUG [RS:0;607fd5c6574c:32897-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 23e1e4928f005993e4764c67507acbfd:info 2024-12-04T06:55:32,160 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:32,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:33,161 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:33,170 DEBUG [master/607fd5c6574c:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=3, created chunk count=9, reused chunk count=69, reuseRatio=88.46% 2024-12-04T06:55:33,170 DEBUG [master/607fd5c6574c:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-04T06:55:33,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:33,984 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-12-04T06:55:33,985 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C32897%2C1733295266166.1733295333984 2024-12-04T06:55:34,000 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:34,000 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:34,000 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:34,000 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:34,000 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:34,001 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/WALs/607fd5c6574c,32897,1733295266166/607fd5c6574c%2C32897%2C1733295266166.1733295266561 with entries=308, filesize=307.10 KB; new WAL /user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/WALs/607fd5c6574c,32897,1733295266166/607fd5c6574c%2C32897%2C1733295266166.1733295333984 2024-12-04T06:55:34,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741833_1009 (size=314476) 2024-12-04T06:55:34,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741833_1009 (size=314476) 2024-12-04T06:55:34,003 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46187:46187),(127.0.0.1/127.0.0.1:37635:37635)] 2024-12-04T06:55:34,006 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 23e1e4928f005993e4764c67507acbfd 1/1 column families, dataSize=5.25 KB heapSize=5.88 KB 2024-12-04T06:55:34,010 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/2ccf3a9de0fa482b82c353b464bc8f9d is 1080, key is row0252/info:/1733295331976/Put/seqid=0 2024-12-04T06:55:34,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741874_1050 (size=10357) 2024-12-04T06:55:34,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741874_1050 (size=10357) 2024-12-04T06:55:34,017 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=5.25 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/2ccf3a9de0fa482b82c353b464bc8f9d 2024-12-04T06:55:34,022 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/.tmp/info/2ccf3a9de0fa482b82c353b464bc8f9d as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/2ccf3a9de0fa482b82c353b464bc8f9d 2024-12-04T06:55:34,027 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/2ccf3a9de0fa482b82c353b464bc8f9d, entries=5, sequenceid=329, filesize=10.1 K 2024-12-04T06:55:34,028 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.25 KB/5380, heapSize ~5.86 KB/6000, currentSize=0 B/0 for 23e1e4928f005993e4764c67507acbfd in 22ms, sequenceid=329, compaction requested=false 2024-12-04T06:55:34,028 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 23e1e4928f005993e4764c67507acbfd: 2024-12-04T06:55:34,028 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-12-04T06:55:34,032 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/hbase/meta/1588230740/.tmp/info/df28cc30697347bd81e6a5299749ed3e is 193, key is TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd./info:regioninfo/1733295282134/Put/seqid=0 2024-12-04T06:55:34,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741875_1051 (size=6223) 2024-12-04T06:55:34,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741875_1051 (size=6223) 2024-12-04T06:55:34,038 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/hbase/meta/1588230740/.tmp/info/df28cc30697347bd81e6a5299749ed3e 2024-12-04T06:55:34,043 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/hbase/meta/1588230740/.tmp/info/df28cc30697347bd81e6a5299749ed3e as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/hbase/meta/1588230740/info/df28cc30697347bd81e6a5299749ed3e 2024-12-04T06:55:34,048 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/hbase/meta/1588230740/info/df28cc30697347bd81e6a5299749ed3e, entries=5, sequenceid=21, filesize=6.1 K 2024-12-04T06:55:34,049 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 21ms, sequenceid=21, compaction requested=false 2024-12-04T06:55:34,049 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-04T06:55:34,049 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 4d030d34fff502afc8453579b74c191a: 2024-12-04T06:55:34,049 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C32897%2C1733295266166.1733295334049 2024-12-04T06:55:34,054 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:34,054 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:34,054 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:34,054 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:34,054 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:34,054 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/WALs/607fd5c6574c,32897,1733295266166/607fd5c6574c%2C32897%2C1733295266166.1733295333984 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/WALs/607fd5c6574c,32897,1733295266166/607fd5c6574c%2C32897%2C1733295266166.1733295334049 2024-12-04T06:55:34,055 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46187:46187),(127.0.0.1/127.0.0.1:37635:37635)] 2024-12-04T06:55:34,055 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/WALs/607fd5c6574c,32897,1733295266166/607fd5c6574c%2C32897%2C1733295266166.1733295333984 is not closed yet, will try archiving it next time 2024-12-04T06:55:34,055 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/WALs/607fd5c6574c,32897,1733295266166/607fd5c6574c%2C32897%2C1733295266166.1733295266561 to hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/oldWALs/607fd5c6574c%2C32897%2C1733295266166.1733295266561 2024-12-04T06:55:34,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741873_1049 (size=731) 2024-12-04T06:55:34,056 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-12-04T06:55:34,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741873_1049 (size=731) 2024-12-04T06:55:34,057 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/WALs/607fd5c6574c,32897,1733295266166/607fd5c6574c%2C32897%2C1733295266166.1733295333984 to hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/oldWALs/607fd5c6574c%2C32897%2C1733295266166.1733295333984 2024-12-04T06:55:34,156 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-04T06:55:34,156 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-04T06:55:34,156 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T06:55:34,156 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:55:34,157 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:55:34,157 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-04T06:55:34,157 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-04T06:55:34,157 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=898434329, stopped=false 2024-12-04T06:55:34,157 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=607fd5c6574c,36799,1733295266107 2024-12-04T06:55:34,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32897-0x1017c40c3ba0001, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T06:55:34,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36799-0x1017c40c3ba0000, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T06:55:34,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32897-0x1017c40c3ba0001, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:55:34,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36799-0x1017c40c3ba0000, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:55:34,159 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-04T06:55:34,159 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-04T06:55:34,159 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T06:55:34,159 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:55:34,160 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '607fd5c6574c,32897,1733295266166' ***** 2024-12-04T06:55:34,160 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:32897-0x1017c40c3ba0001, quorum=127.0.0.1:60571, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T06:55:34,160 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-04T06:55:34,160 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36799-0x1017c40c3ba0000, quorum=127.0.0.1:60571, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T06:55:34,160 INFO [RS:0;607fd5c6574c:32897 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-04T06:55:34,160 INFO [RS:0;607fd5c6574c:32897 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-04T06:55:34,160 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-04T06:55:34,160 INFO [RS:0;607fd5c6574c:32897 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-04T06:55:34,161 INFO [RS:0;607fd5c6574c:32897 {}] regionserver.HRegionServer(3091): Received CLOSE for 23e1e4928f005993e4764c67507acbfd 2024-12-04T06:55:34,161 INFO [RS:0;607fd5c6574c:32897 {}] regionserver.HRegionServer(3091): Received CLOSE for 4d030d34fff502afc8453579b74c191a 2024-12-04T06:55:34,161 INFO [RS:0;607fd5c6574c:32897 {}] regionserver.HRegionServer(959): stopping server 607fd5c6574c,32897,1733295266166 2024-12-04T06:55:34,161 INFO [RS:0;607fd5c6574c:32897 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T06:55:34,161 INFO [RS:0;607fd5c6574c:32897 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;607fd5c6574c:32897. 2024-12-04T06:55:34,161 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 23e1e4928f005993e4764c67507acbfd, disabling compactions & flushes 2024-12-04T06:55:34,161 DEBUG [RS:0;607fd5c6574c:32897 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T06:55:34,161 INFO [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd. 2024-12-04T06:55:34,161 DEBUG [RS:0;607fd5c6574c:32897 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:55:34,161 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd. 2024-12-04T06:55:34,161 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd. after waiting 0 ms 2024-12-04T06:55:34,161 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd. 2024-12-04T06:55:34,161 INFO [RS:0;607fd5c6574c:32897 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-04T06:55:34,161 INFO [RS:0;607fd5c6574c:32897 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-04T06:55:34,161 INFO [RS:0;607fd5c6574c:32897 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-04T06:55:34,161 INFO [RS:0;607fd5c6574c:32897 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-04T06:55:34,161 INFO [RS:0;607fd5c6574c:32897 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-12-04T06:55:34,161 DEBUG [RS:0;607fd5c6574c:32897 {}] regionserver.HRegionServer(1325): Online Regions={23e1e4928f005993e4764c67507acbfd=TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd., 1588230740=hbase:meta,,1.1588230740, 4d030d34fff502afc8453579b74c191a=TestLogRolling-testLogRolling,,1733295281396.4d030d34fff502afc8453579b74c191a.} 2024-12-04T06:55:34,161 DEBUG [RS:0;607fd5c6574c:32897 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 23e1e4928f005993e4764c67507acbfd, 4d030d34fff502afc8453579b74c191a 2024-12-04T06:55:34,162 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-04T06:55:34,162 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-04T06:55:34,162 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-04T06:55:34,162 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-04T06:55:34,162 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-04T06:55:34,161 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:34,162 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/333f35176f324900a887efdce337d1f5.90e74c91b24a6a418dfd5e19734cb3a0->hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/333f35176f324900a887efdce337d1f5-top, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/TestLogRolling-testLogRolling=90e74c91b24a6a418dfd5e19734cb3a0-bfee17580ca44fd38a26bb4f79c8183a, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/29fdd3ce3ded42088f02583157bd7844, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/TestLogRolling-testLogRolling=90e74c91b24a6a418dfd5e19734cb3a0-e9edfd86d2eb4196a5625ee5bdbcfc2e, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/d9a01860595f4a8295b4886f203ef51b, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/d17978525cde46dbb308aa45fb6a8e8b, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/0dc425b96abd42c2beb8e21e084528b0, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/a7581316a2b747c38fd500061328ed79, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/7c7b1effe410455d881271c02dd02626, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/a9182a56cad742b2adaf2e8d7729bb39, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/cfb69f173cf249e6b5c622b3e888c437, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/b60e38fef86547ed8fb4e8567ceb5549, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/d077c5ee51e0401b8045113a17d369a1, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/00c9d3168d8e4bbbadbc35ccf76433e3, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/1ad063d4559f4ffdb0eb159f00164c3b, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/4a22486e53fa47c792d30b76662c4b44, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/b736a1c09d884668837c3eda0398b80b, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/d1b66bb04f2e4c4e97e9f59d73a49301, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/0ed7daf7582c44639545dd1289f957c3, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/5148dd6375cd4ee3b76d0cf5d74e7cf7, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/28a0b8235de0416485094cd965f40afd] to archive 2024-12-04T06:55:34,163 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-04T06:55:34,165 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/333f35176f324900a887efdce337d1f5.90e74c91b24a6a418dfd5e19734cb3a0 to hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/archive/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/333f35176f324900a887efdce337d1f5.90e74c91b24a6a418dfd5e19734cb3a0 2024-12-04T06:55:34,166 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/TestLogRolling-testLogRolling=90e74c91b24a6a418dfd5e19734cb3a0-bfee17580ca44fd38a26bb4f79c8183a to hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/archive/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/TestLogRolling-testLogRolling=90e74c91b24a6a418dfd5e19734cb3a0-bfee17580ca44fd38a26bb4f79c8183a 2024-12-04T06:55:34,166 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-12-04T06:55:34,167 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-04T06:55:34,167 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-04T06:55:34,167 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733295334161Running coprocessor pre-close hooks at 1733295334161Disabling compacts and flushes for region at 1733295334161Disabling writes for close at 1733295334162 (+1 ms)Writing region close event to WAL at 1733295334163 (+1 ms)Running coprocessor post-close hooks at 1733295334167 (+4 ms)Closed at 1733295334167 2024-12-04T06:55:34,167 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-04T06:55:34,167 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/29fdd3ce3ded42088f02583157bd7844 to hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/archive/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/29fdd3ce3ded42088f02583157bd7844 2024-12-04T06:55:34,168 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/TestLogRolling-testLogRolling=90e74c91b24a6a418dfd5e19734cb3a0-e9edfd86d2eb4196a5625ee5bdbcfc2e to hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/archive/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/TestLogRolling-testLogRolling=90e74c91b24a6a418dfd5e19734cb3a0-e9edfd86d2eb4196a5625ee5bdbcfc2e 2024-12-04T06:55:34,169 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/d9a01860595f4a8295b4886f203ef51b to hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/archive/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/d9a01860595f4a8295b4886f203ef51b 2024-12-04T06:55:34,170 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/d17978525cde46dbb308aa45fb6a8e8b to hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/archive/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/d17978525cde46dbb308aa45fb6a8e8b 2024-12-04T06:55:34,171 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/0dc425b96abd42c2beb8e21e084528b0 to hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/archive/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/0dc425b96abd42c2beb8e21e084528b0 2024-12-04T06:55:34,172 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/a7581316a2b747c38fd500061328ed79 to hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/archive/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/a7581316a2b747c38fd500061328ed79 2024-12-04T06:55:34,173 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/7c7b1effe410455d881271c02dd02626 to hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/archive/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/7c7b1effe410455d881271c02dd02626 2024-12-04T06:55:34,174 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/a9182a56cad742b2adaf2e8d7729bb39 to hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/archive/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/a9182a56cad742b2adaf2e8d7729bb39 2024-12-04T06:55:34,175 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/cfb69f173cf249e6b5c622b3e888c437 to hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/archive/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/cfb69f173cf249e6b5c622b3e888c437 2024-12-04T06:55:34,176 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/b60e38fef86547ed8fb4e8567ceb5549 to hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/archive/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/b60e38fef86547ed8fb4e8567ceb5549 2024-12-04T06:55:34,177 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/d077c5ee51e0401b8045113a17d369a1 to hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/archive/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/d077c5ee51e0401b8045113a17d369a1 2024-12-04T06:55:34,178 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/00c9d3168d8e4bbbadbc35ccf76433e3 to hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/archive/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/00c9d3168d8e4bbbadbc35ccf76433e3 2024-12-04T06:55:34,179 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/1ad063d4559f4ffdb0eb159f00164c3b to hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/archive/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/1ad063d4559f4ffdb0eb159f00164c3b 2024-12-04T06:55:34,180 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/4a22486e53fa47c792d30b76662c4b44 to hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/archive/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/4a22486e53fa47c792d30b76662c4b44 2024-12-04T06:55:34,181 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/b736a1c09d884668837c3eda0398b80b to hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/archive/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/b736a1c09d884668837c3eda0398b80b 2024-12-04T06:55:34,182 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/d1b66bb04f2e4c4e97e9f59d73a49301 to hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/archive/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/d1b66bb04f2e4c4e97e9f59d73a49301 2024-12-04T06:55:34,183 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/0ed7daf7582c44639545dd1289f957c3 to hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/archive/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/0ed7daf7582c44639545dd1289f957c3 2024-12-04T06:55:34,184 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/5148dd6375cd4ee3b76d0cf5d74e7cf7 to hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/archive/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/5148dd6375cd4ee3b76d0cf5d74e7cf7 2024-12-04T06:55:34,185 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/28a0b8235de0416485094cd965f40afd to hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/archive/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/info/28a0b8235de0416485094cd965f40afd 2024-12-04T06:55:34,186 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=607fd5c6574c:36799 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-04T06:55:34,186 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [29fdd3ce3ded42088f02583157bd7844=41907, d9a01860595f4a8295b4886f203ef51b=12514, d17978525cde46dbb308aa45fb6a8e8b=69025, 0dc425b96abd42c2beb8e21e084528b0=24394, a7581316a2b747c38fd500061328ed79=14672, 7c7b1effe410455d881271c02dd02626=93998, a9182a56cad742b2adaf2e8d7729bb39=20078, cfb69f173cf249e6b5c622b3e888c437=20078, b60e38fef86547ed8fb4e8567ceb5549=116742, d077c5ee51e0401b8045113a17d369a1=12516, 00c9d3168d8e4bbbadbc35ccf76433e3=25472, 1ad063d4559f4ffdb0eb159f00164c3b=148311, 4a22486e53fa47c792d30b76662c4b44=15750, b736a1c09d884668837c3eda0398b80b=23327, d1b66bb04f2e4c4e97e9f59d73a49301=180785, 0ed7daf7582c44639545dd1289f957c3=19013, 5148dd6375cd4ee3b76d0cf5d74e7cf7=12523, 28a0b8235de0416485094cd965f40afd=27649] 2024-12-04T06:55:34,189 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/23e1e4928f005993e4764c67507acbfd/recovered.edits/332.seqid, newMaxSeqId=332, maxSeqId=122 2024-12-04T06:55:34,190 INFO [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd. 2024-12-04T06:55:34,190 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 23e1e4928f005993e4764c67507acbfd: Waiting for close lock at 1733295334161Running coprocessor pre-close hooks at 1733295334161Disabling compacts and flushes for region at 1733295334161Disabling writes for close at 1733295334161Writing region close event to WAL at 1733295334186 (+25 ms)Running coprocessor post-close hooks at 1733295334190 (+4 ms)Closed at 1733295334190 2024-12-04T06:55:34,190 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1733295281396.23e1e4928f005993e4764c67507acbfd. 2024-12-04T06:55:34,190 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 4d030d34fff502afc8453579b74c191a, disabling compactions & flushes 2024-12-04T06:55:34,190 INFO [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733295281396.4d030d34fff502afc8453579b74c191a. 2024-12-04T06:55:34,190 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733295281396.4d030d34fff502afc8453579b74c191a. 2024-12-04T06:55:34,190 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733295281396.4d030d34fff502afc8453579b74c191a. after waiting 0 ms 2024-12-04T06:55:34,190 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733295281396.4d030d34fff502afc8453579b74c191a. 2024-12-04T06:55:34,190 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733295281396.4d030d34fff502afc8453579b74c191a.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/4d030d34fff502afc8453579b74c191a/info/333f35176f324900a887efdce337d1f5.90e74c91b24a6a418dfd5e19734cb3a0->hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/90e74c91b24a6a418dfd5e19734cb3a0/info/333f35176f324900a887efdce337d1f5-bottom] to archive 2024-12-04T06:55:34,191 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733295281396.4d030d34fff502afc8453579b74c191a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-04T06:55:34,192 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733295281396.4d030d34fff502afc8453579b74c191a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/4d030d34fff502afc8453579b74c191a/info/333f35176f324900a887efdce337d1f5.90e74c91b24a6a418dfd5e19734cb3a0 to hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/archive/data/default/TestLogRolling-testLogRolling/4d030d34fff502afc8453579b74c191a/info/333f35176f324900a887efdce337d1f5.90e74c91b24a6a418dfd5e19734cb3a0 2024-12-04T06:55:34,193 WARN [StoreCloser-TestLogRolling-testLogRolling,,1733295281396.4d030d34fff502afc8453579b74c191a.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-12-04T06:55:34,196 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/data/default/TestLogRolling-testLogRolling/4d030d34fff502afc8453579b74c191a/recovered.edits/127.seqid, newMaxSeqId=127, maxSeqId=122 2024-12-04T06:55:34,196 INFO [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733295281396.4d030d34fff502afc8453579b74c191a. 2024-12-04T06:55:34,197 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 4d030d34fff502afc8453579b74c191a: Waiting for close lock at 1733295334190Running coprocessor pre-close hooks at 1733295334190Disabling compacts and flushes for region at 1733295334190Disabling writes for close at 1733295334190Writing region close event to WAL at 1733295334193 (+3 ms)Running coprocessor post-close hooks at 1733295334196 (+3 ms)Closed at 1733295334196 2024-12-04T06:55:34,197 DEBUG [RS_CLOSE_REGION-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1733295281396.4d030d34fff502afc8453579b74c191a. 2024-12-04T06:55:34,362 INFO [RS:0;607fd5c6574c:32897 {}] regionserver.HRegionServer(976): stopping server 607fd5c6574c,32897,1733295266166; all regions closed. 2024-12-04T06:55:34,362 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:34,362 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:34,362 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:34,363 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:34,363 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:34,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741834_1010 (size=8107) 2024-12-04T06:55:34,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741834_1010 (size=8107) 2024-12-04T06:55:34,367 DEBUG [RS:0;607fd5c6574c:32897 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/oldWALs 2024-12-04T06:55:34,367 INFO [RS:0;607fd5c6574c:32897 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 607fd5c6574c%2C32897%2C1733295266166.meta:.meta(num 1733295266973) 2024-12-04T06:55:34,368 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:34,368 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:34,368 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:34,368 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:34,369 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:34,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741876_1052 (size=778) 2024-12-04T06:55:34,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741876_1052 (size=778) 2024-12-04T06:55:34,375 DEBUG [RS:0;607fd5c6574c:32897 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/oldWALs 2024-12-04T06:55:34,375 INFO [RS:0;607fd5c6574c:32897 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 607fd5c6574c%2C32897%2C1733295266166:(num 1733295334049) 2024-12-04T06:55:34,375 DEBUG [RS:0;607fd5c6574c:32897 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:55:34,375 INFO [RS:0;607fd5c6574c:32897 {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T06:55:34,375 INFO [RS:0;607fd5c6574c:32897 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T06:55:34,375 INFO [RS:0;607fd5c6574c:32897 {}] hbase.ChoreService(370): Chore service for: regionserver/607fd5c6574c:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-04T06:55:34,375 INFO [RS:0;607fd5c6574c:32897 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T06:55:34,375 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T06:55:34,376 INFO [RS:0;607fd5c6574c:32897 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:32897 2024-12-04T06:55:34,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36799-0x1017c40c3ba0000, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T06:55:34,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32897-0x1017c40c3ba0001, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/607fd5c6574c,32897,1733295266166 2024-12-04T06:55:34,379 INFO [RS:0;607fd5c6574c:32897 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T06:55:34,380 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [607fd5c6574c,32897,1733295266166] 2024-12-04T06:55:34,381 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/607fd5c6574c,32897,1733295266166 already deleted, retry=false 2024-12-04T06:55:34,381 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 607fd5c6574c,32897,1733295266166 expired; onlineServers=0 2024-12-04T06:55:34,381 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '607fd5c6574c,36799,1733295266107' ***** 2024-12-04T06:55:34,381 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-04T06:55:34,382 INFO [M:0;607fd5c6574c:36799 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T06:55:34,382 INFO [M:0;607fd5c6574c:36799 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T06:55:34,382 DEBUG [M:0;607fd5c6574c:36799 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-04T06:55:34,382 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-04T06:55:34,382 DEBUG [M:0;607fd5c6574c:36799 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-04T06:55:34,382 DEBUG [master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.small.0-1733295266359 {}] cleaner.HFileCleaner(306): Exit Thread[master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.small.0-1733295266359,5,FailOnTimeoutGroup] 2024-12-04T06:55:34,382 DEBUG [master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.large.0-1733295266359 {}] cleaner.HFileCleaner(306): Exit Thread[master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.large.0-1733295266359,5,FailOnTimeoutGroup] 2024-12-04T06:55:34,382 INFO [M:0;607fd5c6574c:36799 {}] hbase.ChoreService(370): Chore service for: master/607fd5c6574c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-04T06:55:34,382 INFO [M:0;607fd5c6574c:36799 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T06:55:34,382 DEBUG [M:0;607fd5c6574c:36799 {}] master.HMaster(1795): Stopping service threads 2024-12-04T06:55:34,382 INFO [M:0;607fd5c6574c:36799 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-04T06:55:34,382 INFO [M:0;607fd5c6574c:36799 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-04T06:55:34,382 INFO [M:0;607fd5c6574c:36799 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-04T06:55:34,383 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-04T06:55:34,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36799-0x1017c40c3ba0000, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-04T06:55:34,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36799-0x1017c40c3ba0000, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:55:34,384 DEBUG [M:0;607fd5c6574c:36799 {}] zookeeper.ZKUtil(347): master:36799-0x1017c40c3ba0000, quorum=127.0.0.1:60571, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-04T06:55:34,384 WARN [M:0;607fd5c6574c:36799 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-04T06:55:34,384 INFO [M:0;607fd5c6574c:36799 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/.lastflushedseqids 2024-12-04T06:55:34,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741877_1053 (size=228) 2024-12-04T06:55:34,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741877_1053 (size=228) 2024-12-04T06:55:34,390 INFO [M:0;607fd5c6574c:36799 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-04T06:55:34,390 INFO [M:0;607fd5c6574c:36799 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-04T06:55:34,390 DEBUG [M:0;607fd5c6574c:36799 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-04T06:55:34,390 INFO [M:0;607fd5c6574c:36799 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:55:34,390 DEBUG [M:0;607fd5c6574c:36799 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:55:34,390 DEBUG [M:0;607fd5c6574c:36799 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-04T06:55:34,390 DEBUG [M:0;607fd5c6574c:36799 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:55:34,390 INFO [M:0;607fd5c6574c:36799 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.41 KB heapSize=63.33 KB 2024-12-04T06:55:34,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:34,408 DEBUG [M:0;607fd5c6574c:36799 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7c07415911d3421ab8c538412bdab2c1 is 82, key is hbase:meta,,1/info:regioninfo/1733295267000/Put/seqid=0 2024-12-04T06:55:34,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741878_1054 (size=5672) 2024-12-04T06:55:34,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741878_1054 (size=5672) 2024-12-04T06:55:34,413 INFO [M:0;607fd5c6574c:36799 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7c07415911d3421ab8c538412bdab2c1 2024-12-04T06:55:34,426 INFO [regionserver/607fd5c6574c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T06:55:34,433 DEBUG [M:0;607fd5c6574c:36799 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/be32efebccce4ba48dd28078ea4a6d35 is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733295267477/Put/seqid=0 2024-12-04T06:55:34,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741879_1055 (size=7089) 2024-12-04T06:55:34,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741879_1055 (size=7089) 2024-12-04T06:55:34,442 INFO [M:0;607fd5c6574c:36799 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.80 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/be32efebccce4ba48dd28078ea4a6d35 2024-12-04T06:55:34,447 INFO [M:0;607fd5c6574c:36799 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for be32efebccce4ba48dd28078ea4a6d35 2024-12-04T06:55:34,461 DEBUG [M:0;607fd5c6574c:36799 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/34fe1100bc4042a7b5d16d625d05e8ba is 69, key is 607fd5c6574c,32897,1733295266166/rs:state/1733295266408/Put/seqid=0 2024-12-04T06:55:34,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741880_1056 (size=5156) 2024-12-04T06:55:34,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741880_1056 (size=5156) 2024-12-04T06:55:34,466 INFO [M:0;607fd5c6574c:36799 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/34fe1100bc4042a7b5d16d625d05e8ba 2024-12-04T06:55:34,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32897-0x1017c40c3ba0001, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T06:55:34,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32897-0x1017c40c3ba0001, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T06:55:34,481 INFO [RS:0;607fd5c6574c:32897 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T06:55:34,481 INFO [RS:0;607fd5c6574c:32897 {}] regionserver.HRegionServer(1031): Exiting; stopping=607fd5c6574c,32897,1733295266166; zookeeper connection closed. 2024-12-04T06:55:34,481 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4bae9eb3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4bae9eb3 2024-12-04T06:55:34,481 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-04T06:55:34,485 DEBUG [M:0;607fd5c6574c:36799 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fccb5044136c4bc9992beb622686c80d is 52, key is load_balancer_on/state:d/1733295267095/Put/seqid=0 2024-12-04T06:55:34,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741881_1057 (size=5056) 2024-12-04T06:55:34,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741881_1057 (size=5056) 2024-12-04T06:55:34,490 INFO [M:0;607fd5c6574c:36799 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fccb5044136c4bc9992beb622686c80d 2024-12-04T06:55:34,495 DEBUG [M:0;607fd5c6574c:36799 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7c07415911d3421ab8c538412bdab2c1 as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7c07415911d3421ab8c538412bdab2c1 2024-12-04T06:55:34,499 INFO [M:0;607fd5c6574c:36799 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7c07415911d3421ab8c538412bdab2c1, entries=8, sequenceid=125, filesize=5.5 K 2024-12-04T06:55:34,500 DEBUG [M:0;607fd5c6574c:36799 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/be32efebccce4ba48dd28078ea4a6d35 as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/be32efebccce4ba48dd28078ea4a6d35 2024-12-04T06:55:34,505 INFO [M:0;607fd5c6574c:36799 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for be32efebccce4ba48dd28078ea4a6d35 2024-12-04T06:55:34,505 INFO [M:0;607fd5c6574c:36799 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/be32efebccce4ba48dd28078ea4a6d35, entries=13, sequenceid=125, filesize=6.9 K 2024-12-04T06:55:34,506 DEBUG [M:0;607fd5c6574c:36799 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/34fe1100bc4042a7b5d16d625d05e8ba as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/34fe1100bc4042a7b5d16d625d05e8ba 2024-12-04T06:55:34,510 INFO [M:0;607fd5c6574c:36799 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/34fe1100bc4042a7b5d16d625d05e8ba, entries=1, sequenceid=125, filesize=5.0 K 2024-12-04T06:55:34,511 DEBUG [M:0;607fd5c6574c:36799 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fccb5044136c4bc9992beb622686c80d as hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/fccb5044136c4bc9992beb622686c80d 2024-12-04T06:55:34,515 INFO [M:0;607fd5c6574c:36799 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41769/user/jenkins/test-data/f3497993-50f5-615f-61e3-673796a0b7d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/fccb5044136c4bc9992beb622686c80d, entries=1, sequenceid=125, filesize=4.9 K 2024-12-04T06:55:34,517 INFO [M:0;607fd5c6574c:36799 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.41 KB/52639, heapSize ~63.27 KB/64784, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 126ms, sequenceid=125, compaction requested=false 2024-12-04T06:55:34,518 INFO [M:0;607fd5c6574c:36799 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:55:34,519 DEBUG [M:0;607fd5c6574c:36799 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733295334390Disabling compacts and flushes for region at 1733295334390Disabling writes for close at 1733295334390Obtaining lock to block concurrent updates at 1733295334390Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733295334390Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52639, getHeapSize=64784, getOffHeapSize=0, getCellsCount=148 at 1733295334391 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733295334391Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733295334391Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733295334408 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733295334408Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733295334417 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733295334432 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733295334432Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733295334447 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733295334460 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733295334461 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733295334470 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733295334484 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733295334484Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@226ff382: reopening flushed file at 1733295334494 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7d32e70a: reopening flushed file at 1733295334500 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@40674b17: reopening flushed file at 1733295334505 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@603915c: reopening flushed file at 1733295334510 (+5 ms)Finished flush of dataSize ~51.41 KB/52639, heapSize ~63.27 KB/64784, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 126ms, sequenceid=125, compaction requested=false at 1733295334517 (+7 ms)Writing region close event to WAL at 1733295334518 (+1 ms)Closed at 1733295334518 2024-12-04T06:55:34,519 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:34,519 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:34,519 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:34,519 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:34,519 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:34,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43645 is added to blk_1073741830_1006 (size=61308) 2024-12-04T06:55:34,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35313 is added to blk_1073741830_1006 (size=61308) 2024-12-04T06:55:34,926 INFO [M:0;607fd5c6574c:36799 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-04T06:55:34,926 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T06:55:34,926 INFO [M:0;607fd5c6574c:36799 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36799 2024-12-04T06:55:34,926 INFO [M:0;607fd5c6574c:36799 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T06:55:35,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36799-0x1017c40c3ba0000, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T06:55:35,029 INFO [M:0;607fd5c6574c:36799 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T06:55:35,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36799-0x1017c40c3ba0000, quorum=127.0.0.1:60571, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T06:55:35,033 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2735da07{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:55:35,033 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3e770e70{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T06:55:35,033 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T06:55:35,033 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@284a8a01{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T06:55:35,033 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@250c37c4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/hadoop.log.dir/,STOPPED} 2024-12-04T06:55:35,035 WARN [BP-1932486448-172.17.0.2-1733295265189 heartbeating to localhost/127.0.0.1:41769 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T06:55:35,035 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T06:55:35,035 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T06:55:35,035 WARN [BP-1932486448-172.17.0.2-1733295265189 heartbeating to localhost/127.0.0.1:41769 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1932486448-172.17.0.2-1733295265189 (Datanode Uuid c135430b-62b1-46cb-8901-96a27938365a) service to localhost/127.0.0.1:41769 2024-12-04T06:55:35,036 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/cluster_6d60fc30-8bea-1b14-0596-45cf526269fc/data/data3/current/BP-1932486448-172.17.0.2-1733295265189 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:55:35,036 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/cluster_6d60fc30-8bea-1b14-0596-45cf526269fc/data/data4/current/BP-1932486448-172.17.0.2-1733295265189 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:55:35,037 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T06:55:35,043 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6f8f17a1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:55:35,044 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7da32d48{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T06:55:35,044 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T06:55:35,044 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@33382c80{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T06:55:35,044 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@a8558d3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/hadoop.log.dir/,STOPPED} 2024-12-04T06:55:35,045 WARN [BP-1932486448-172.17.0.2-1733295265189 heartbeating to localhost/127.0.0.1:41769 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T06:55:35,045 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T06:55:35,045 WARN [BP-1932486448-172.17.0.2-1733295265189 heartbeating to localhost/127.0.0.1:41769 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1932486448-172.17.0.2-1733295265189 (Datanode Uuid a5cb09a8-5cf6-49d7-95c7-697f13452c95) service to localhost/127.0.0.1:41769 2024-12-04T06:55:35,045 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T06:55:35,046 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/cluster_6d60fc30-8bea-1b14-0596-45cf526269fc/data/data1/current/BP-1932486448-172.17.0.2-1733295265189 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:55:35,046 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/cluster_6d60fc30-8bea-1b14-0596-45cf526269fc/data/data2/current/BP-1932486448-172.17.0.2-1733295265189 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:55:35,046 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T06:55:35,053 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@282647c0{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-04T06:55:35,053 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3db1ef4a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T06:55:35,053 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T06:55:35,053 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@640c9e09{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T06:55:35,053 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@446d84f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/hadoop.log.dir/,STOPPED} 2024-12-04T06:55:35,061 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-04T06:55:35,089 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-04T06:55:35,101 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=228 (was 208) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41769 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41769 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:41769 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41769 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41769 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41769 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41769 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41769 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=509 (was 485) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=251 (was 258), ProcessCount=11 (was 11), AvailableMemoryMB=5539 (was 5841) 2024-12-04T06:55:35,110 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=228, OpenFileDescriptor=509, MaxFileDescriptor=1048576, SystemLoadAverage=251, ProcessCount=11, AvailableMemoryMB=5539 2024-12-04T06:55:35,111 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-04T06:55:35,111 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/hadoop.log.dir so I do NOT create it in target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9 2024-12-04T06:55:35,111 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4eeb4a1c-c37c-1e02-1528-c4756447003d/hadoop.tmp.dir so I do NOT create it in target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9 2024-12-04T06:55:35,111 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/cluster_621cef8b-624b-8449-63fc-c819cdddd2fd, deleteOnExit=true 2024-12-04T06:55:35,111 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-04T06:55:35,111 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/test.cache.data in system properties and HBase conf 2024-12-04T06:55:35,111 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/hadoop.tmp.dir in system properties and HBase conf 2024-12-04T06:55:35,111 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/hadoop.log.dir in system properties and HBase conf 2024-12-04T06:55:35,111 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-04T06:55:35,111 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-04T06:55:35,111 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-04T06:55:35,111 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-04T06:55:35,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-04T06:55:35,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-04T06:55:35,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-04T06:55:35,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-04T06:55:35,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-04T06:55:35,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-04T06:55:35,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-04T06:55:35,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-04T06:55:35,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-04T06:55:35,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/nfs.dump.dir in system properties and HBase conf 2024-12-04T06:55:35,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/java.io.tmpdir in system properties and HBase conf 2024-12-04T06:55:35,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-04T06:55:35,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-04T06:55:35,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-04T06:55:35,126 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-04T06:55:35,162 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:35,194 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:55:35,199 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T06:55:35,208 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T06:55:35,208 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T06:55:35,208 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T06:55:35,209 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:55:35,209 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d7a0e2e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/hadoop.log.dir/,AVAILABLE} 2024-12-04T06:55:35,209 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@32d01bcd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T06:55:35,325 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5d175de5{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/java.io.tmpdir/jetty-localhost-41213-hadoop-hdfs-3_4_1-tests_jar-_-any-15896938553043589341/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-04T06:55:35,326 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@51af64d5{HTTP/1.1, (http/1.1)}{localhost:41213} 2024-12-04T06:55:35,326 INFO [Time-limited test {}] server.Server(415): Started @307851ms 2024-12-04T06:55:35,339 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-04T06:55:35,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:35,405 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:55:35,408 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T06:55:35,409 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T06:55:35,409 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T06:55:35,409 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-04T06:55:35,409 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24bb5ef7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/hadoop.log.dir/,AVAILABLE} 2024-12-04T06:55:35,410 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6c8a9439{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T06:55:35,545 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@483384c3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/java.io.tmpdir/jetty-localhost-34319-hadoop-hdfs-3_4_1-tests_jar-_-any-9235251732322587904/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:55:35,546 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3e70e237{HTTP/1.1, (http/1.1)}{localhost:34319} 2024-12-04T06:55:35,546 INFO [Time-limited test {}] server.Server(415): Started @308071ms 2024-12-04T06:55:35,547 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T06:55:35,581 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T06:55:35,583 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T06:55:35,584 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T06:55:35,584 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T06:55:35,584 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T06:55:35,585 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@238bf9b3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/hadoop.log.dir/,AVAILABLE} 2024-12-04T06:55:35,585 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@814e400{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T06:55:35,663 WARN [Thread-2453 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/cluster_621cef8b-624b-8449-63fc-c819cdddd2fd/data/data1/current/BP-116886128-172.17.0.2-1733295335132/current, will proceed with Du for space computation calculation, 2024-12-04T06:55:35,663 WARN [Thread-2454 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/cluster_621cef8b-624b-8449-63fc-c819cdddd2fd/data/data2/current/BP-116886128-172.17.0.2-1733295335132/current, will proceed with Du for space computation calculation, 2024-12-04T06:55:35,686 WARN [Thread-2432 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T06:55:35,689 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1f823755bde53c9d with lease ID 0xd3516a8069aa608c: Processing first storage report for DS-fd650180-a248-4b0e-81d3-c9a9e208bbf6 from datanode DatanodeRegistration(127.0.0.1:38767, datanodeUuid=a9d2a0c7-971a-4cd0-9d67-07f7cdb49567, infoPort=34811, infoSecurePort=0, ipcPort=33327, storageInfo=lv=-57;cid=testClusterID;nsid=1862272620;c=1733295335132) 2024-12-04T06:55:35,689 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1f823755bde53c9d with lease ID 0xd3516a8069aa608c: from storage DS-fd650180-a248-4b0e-81d3-c9a9e208bbf6 node DatanodeRegistration(127.0.0.1:38767, datanodeUuid=a9d2a0c7-971a-4cd0-9d67-07f7cdb49567, infoPort=34811, infoSecurePort=0, ipcPort=33327, storageInfo=lv=-57;cid=testClusterID;nsid=1862272620;c=1733295335132), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:55:35,689 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1f823755bde53c9d with lease ID 0xd3516a8069aa608c: Processing first storage report for DS-c6180cd7-9a4a-4453-b8ec-33cea26e2fc6 from datanode DatanodeRegistration(127.0.0.1:38767, datanodeUuid=a9d2a0c7-971a-4cd0-9d67-07f7cdb49567, infoPort=34811, infoSecurePort=0, ipcPort=33327, storageInfo=lv=-57;cid=testClusterID;nsid=1862272620;c=1733295335132) 2024-12-04T06:55:35,689 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1f823755bde53c9d with lease ID 0xd3516a8069aa608c: from storage DS-c6180cd7-9a4a-4453-b8ec-33cea26e2fc6 node DatanodeRegistration(127.0.0.1:38767, datanodeUuid=a9d2a0c7-971a-4cd0-9d67-07f7cdb49567, infoPort=34811, infoSecurePort=0, ipcPort=33327, storageInfo=lv=-57;cid=testClusterID;nsid=1862272620;c=1733295335132), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:55:35,720 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@372f7d77{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/java.io.tmpdir/jetty-localhost-45721-hadoop-hdfs-3_4_1-tests_jar-_-any-10782184814163581452/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:55:35,721 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3ffef2a8{HTTP/1.1, (http/1.1)}{localhost:45721} 2024-12-04T06:55:35,721 INFO [Time-limited test {}] server.Server(415): Started @308246ms 2024-12-04T06:55:35,722 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T06:55:35,817 WARN [Thread-2479 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/cluster_621cef8b-624b-8449-63fc-c819cdddd2fd/data/data3/current/BP-116886128-172.17.0.2-1733295335132/current, will proceed with Du for space computation calculation, 2024-12-04T06:55:35,817 WARN [Thread-2480 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/cluster_621cef8b-624b-8449-63fc-c819cdddd2fd/data/data4/current/BP-116886128-172.17.0.2-1733295335132/current, will proceed with Du for space computation calculation, 2024-12-04T06:55:35,837 WARN [Thread-2468 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T06:55:35,839 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xae5c2299f36242bc with lease ID 0xd3516a8069aa608d: Processing first storage report for DS-46c77a16-2c5a-464b-926d-d9d35def46ca from datanode DatanodeRegistration(127.0.0.1:42213, datanodeUuid=89e9b2f6-273b-4e08-8638-f2879e8ec936, infoPort=44181, infoSecurePort=0, ipcPort=42901, storageInfo=lv=-57;cid=testClusterID;nsid=1862272620;c=1733295335132) 2024-12-04T06:55:35,839 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xae5c2299f36242bc with lease ID 0xd3516a8069aa608d: from storage DS-46c77a16-2c5a-464b-926d-d9d35def46ca node DatanodeRegistration(127.0.0.1:42213, datanodeUuid=89e9b2f6-273b-4e08-8638-f2879e8ec936, infoPort=44181, infoSecurePort=0, ipcPort=42901, storageInfo=lv=-57;cid=testClusterID;nsid=1862272620;c=1733295335132), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:55:35,840 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xae5c2299f36242bc with lease ID 0xd3516a8069aa608d: Processing first storage report for DS-14f0c49d-8a3b-4943-97dd-17bacc253a67 from datanode DatanodeRegistration(127.0.0.1:42213, datanodeUuid=89e9b2f6-273b-4e08-8638-f2879e8ec936, infoPort=44181, infoSecurePort=0, ipcPort=42901, storageInfo=lv=-57;cid=testClusterID;nsid=1862272620;c=1733295335132) 2024-12-04T06:55:35,840 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xae5c2299f36242bc with lease ID 0xd3516a8069aa608d: from storage DS-14f0c49d-8a3b-4943-97dd-17bacc253a67 node DatanodeRegistration(127.0.0.1:42213, datanodeUuid=89e9b2f6-273b-4e08-8638-f2879e8ec936, infoPort=44181, infoSecurePort=0, ipcPort=42901, storageInfo=lv=-57;cid=testClusterID;nsid=1862272620;c=1733295335132), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T06:55:35,845 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9 2024-12-04T06:55:35,850 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/cluster_621cef8b-624b-8449-63fc-c819cdddd2fd/zookeeper_0, clientPort=57336, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/cluster_621cef8b-624b-8449-63fc-c819cdddd2fd/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/cluster_621cef8b-624b-8449-63fc-c819cdddd2fd/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-04T06:55:35,851 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57336 2024-12-04T06:55:35,851 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:55:35,852 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:55:35,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38767 is added to blk_1073741825_1001 (size=7) 2024-12-04T06:55:35,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42213 is added to blk_1073741825_1001 (size=7) 2024-12-04T06:55:35,872 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc with version=8 2024-12-04T06:55:35,872 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38117/user/jenkins/test-data/7804b847-b382-57c3-186a-a9bbeb9eaf95/hbase-staging 2024-12-04T06:55:35,874 INFO [Time-limited test {}] client.ConnectionUtils(128): master/607fd5c6574c:0 server-side Connection retries=45 2024-12-04T06:55:35,874 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T06:55:35,874 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T06:55:35,874 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T06:55:35,874 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T06:55:35,874 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T06:55:35,874 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-04T06:55:35,875 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T06:55:35,875 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46787 2024-12-04T06:55:35,876 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46787 connecting to ZooKeeper ensemble=127.0.0.1:57336 2024-12-04T06:55:35,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:467870x0, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T06:55:35,883 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46787-0x1017c41d4390000 connected 2024-12-04T06:55:35,912 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:55:35,914 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:55:35,916 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46787-0x1017c41d4390000, quorum=127.0.0.1:57336, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T06:55:35,916 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc, hbase.cluster.distributed=false 2024-12-04T06:55:35,918 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46787-0x1017c41d4390000, quorum=127.0.0.1:57336, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T06:55:35,919 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46787 2024-12-04T06:55:35,919 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46787 2024-12-04T06:55:35,920 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46787 2024-12-04T06:55:35,920 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46787 2024-12-04T06:55:35,920 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46787 2024-12-04T06:55:35,935 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/607fd5c6574c:0 server-side Connection retries=45 2024-12-04T06:55:35,935 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T06:55:35,935 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T06:55:35,935 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T06:55:35,935 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T06:55:35,935 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T06:55:35,936 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-04T06:55:35,936 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T06:55:35,936 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35753 2024-12-04T06:55:35,937 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35753 connecting to ZooKeeper ensemble=127.0.0.1:57336 2024-12-04T06:55:35,938 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:55:35,939 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:55:35,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:357530x0, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T06:55:35,944 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35753-0x1017c41d4390001 connected 2024-12-04T06:55:35,944 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35753-0x1017c41d4390001, quorum=127.0.0.1:57336, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T06:55:35,944 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-04T06:55:35,945 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-04T06:55:35,945 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35753-0x1017c41d4390001, quorum=127.0.0.1:57336, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-04T06:55:35,947 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35753-0x1017c41d4390001, quorum=127.0.0.1:57336, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T06:55:35,947 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35753 2024-12-04T06:55:35,947 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35753 2024-12-04T06:55:35,948 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35753 2024-12-04T06:55:35,952 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35753 2024-12-04T06:55:35,952 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35753 2024-12-04T06:55:35,965 DEBUG [M:0;607fd5c6574c:46787 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;607fd5c6574c:46787 2024-12-04T06:55:35,965 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/607fd5c6574c,46787,1733295335874 2024-12-04T06:55:35,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35753-0x1017c41d4390001, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T06:55:35,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46787-0x1017c41d4390000, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T06:55:35,967 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46787-0x1017c41d4390000, quorum=127.0.0.1:57336, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/607fd5c6574c,46787,1733295335874 2024-12-04T06:55:35,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35753-0x1017c41d4390001, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-04T06:55:35,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46787-0x1017c41d4390000, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:55:35,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35753-0x1017c41d4390001, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:55:35,970 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46787-0x1017c41d4390000, quorum=127.0.0.1:57336, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-04T06:55:35,971 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/607fd5c6574c,46787,1733295335874 from backup master directory 2024-12-04T06:55:35,972 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46787-0x1017c41d4390000, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/607fd5c6574c,46787,1733295335874 2024-12-04T06:55:35,972 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46787-0x1017c41d4390000, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T06:55:35,972 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35753-0x1017c41d4390001, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T06:55:35,973 WARN [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T06:55:35,973 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=607fd5c6574c,46787,1733295335874 2024-12-04T06:55:35,976 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/hbase.id] with ID: 9b5617ce-d211-4738-81c6-a467f86f607e 2024-12-04T06:55:35,976 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/.tmp/hbase.id 2024-12-04T06:55:35,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42213 is added to blk_1073741826_1002 (size=42) 2024-12-04T06:55:35,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38767 is added to blk_1073741826_1002 (size=42) 2024-12-04T06:55:35,985 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/.tmp/hbase.id]:[hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/hbase.id] 2024-12-04T06:55:35,996 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:55:35,996 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-04T06:55:35,998 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-04T06:55:36,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35753-0x1017c41d4390001, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:55:36,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46787-0x1017c41d4390000, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:55:36,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38767 is added to blk_1073741827_1003 (size=196) 2024-12-04T06:55:36,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42213 is added to blk_1073741827_1003 (size=196) 2024-12-04T06:55:36,007 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T06:55:36,008 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-04T06:55:36,009 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T06:55:36,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38767 is added to blk_1073741828_1004 (size=1189) 2024-12-04T06:55:36,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42213 is added to blk_1073741828_1004 (size=1189) 2024-12-04T06:55:36,016 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/MasterData/data/master/store 2024-12-04T06:55:36,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38767 is added to blk_1073741829_1005 (size=34) 2024-12-04T06:55:36,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42213 is added to blk_1073741829_1005 (size=34) 2024-12-04T06:55:36,023 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:55:36,023 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-04T06:55:36,023 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:55:36,023 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:55:36,023 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-04T06:55:36,023 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:55:36,023 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:55:36,023 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733295336023Disabling compacts and flushes for region at 1733295336023Disabling writes for close at 1733295336023Writing region close event to WAL at 1733295336023Closed at 1733295336023 2024-12-04T06:55:36,023 WARN [master/607fd5c6574c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/MasterData/data/master/store/.initializing 2024-12-04T06:55:36,024 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/MasterData/WALs/607fd5c6574c,46787,1733295335874 2024-12-04T06:55:36,026 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=607fd5c6574c%2C46787%2C1733295335874, suffix=, logDir=hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/MasterData/WALs/607fd5c6574c,46787,1733295335874, archiveDir=hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/MasterData/oldWALs, maxLogs=10 2024-12-04T06:55:36,026 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C46787%2C1733295335874.1733295336026 2024-12-04T06:55:36,030 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/MasterData/WALs/607fd5c6574c,46787,1733295335874/607fd5c6574c%2C46787%2C1733295335874.1733295336026 2024-12-04T06:55:36,031 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44181:44181),(127.0.0.1/127.0.0.1:34811:34811)] 2024-12-04T06:55:36,032 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-04T06:55:36,032 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:55:36,032 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:55:36,032 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:55:36,034 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:55:36,035 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-04T06:55:36,035 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:55:36,036 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:55:36,036 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:55:36,037 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-04T06:55:36,037 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:55:36,037 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T06:55:36,037 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:55:36,038 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-04T06:55:36,038 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:55:36,039 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T06:55:36,039 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:55:36,040 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-04T06:55:36,040 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:55:36,040 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T06:55:36,040 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:55:36,041 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:55:36,041 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:55:36,042 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:55:36,042 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:55:36,043 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-04T06:55:36,044 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T06:55:36,046 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T06:55:36,046 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=743799, jitterRate=-0.05421076714992523}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-04T06:55:36,047 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733295336032Initializing all the Stores at 1733295336033 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295336033Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295336033Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295336033Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295336033Cleaning up temporary data from old regions at 1733295336042 (+9 ms)Region opened successfully at 1733295336047 (+5 ms) 2024-12-04T06:55:36,047 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-04T06:55:36,051 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f2b6dab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=607fd5c6574c/172.17.0.2:0 2024-12-04T06:55:36,051 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-04T06:55:36,052 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-04T06:55:36,052 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-04T06:55:36,052 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-04T06:55:36,052 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-04T06:55:36,052 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-04T06:55:36,052 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-04T06:55:36,058 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-04T06:55:36,059 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46787-0x1017c41d4390000, quorum=127.0.0.1:57336, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-04T06:55:36,060 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-04T06:55:36,061 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-04T06:55:36,061 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46787-0x1017c41d4390000, quorum=127.0.0.1:57336, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-04T06:55:36,064 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-04T06:55:36,064 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-04T06:55:36,065 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46787-0x1017c41d4390000, quorum=127.0.0.1:57336, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-04T06:55:36,066 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-04T06:55:36,067 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46787-0x1017c41d4390000, quorum=127.0.0.1:57336, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-04T06:55:36,068 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-04T06:55:36,070 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46787-0x1017c41d4390000, quorum=127.0.0.1:57336, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-04T06:55:36,071 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-04T06:55:36,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35753-0x1017c41d4390001, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T06:55:36,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46787-0x1017c41d4390000, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T06:55:36,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35753-0x1017c41d4390001, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:55:36,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46787-0x1017c41d4390000, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:55:36,075 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=607fd5c6574c,46787,1733295335874, sessionid=0x1017c41d4390000, setting cluster-up flag (Was=false) 2024-12-04T06:55:36,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46787-0x1017c41d4390000, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:55:36,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35753-0x1017c41d4390001, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:55:36,083 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-04T06:55:36,084 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=607fd5c6574c,46787,1733295335874 2024-12-04T06:55:36,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35753-0x1017c41d4390001, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:55:36,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46787-0x1017c41d4390000, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:55:36,094 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-04T06:55:36,095 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=607fd5c6574c,46787,1733295335874 2024-12-04T06:55:36,096 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-04T06:55:36,098 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-04T06:55:36,098 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-04T06:55:36,098 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-04T06:55:36,098 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 607fd5c6574c,46787,1733295335874 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-04T06:55:36,099 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/607fd5c6574c:0, corePoolSize=5, maxPoolSize=5 2024-12-04T06:55:36,099 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/607fd5c6574c:0, corePoolSize=5, maxPoolSize=5 2024-12-04T06:55:36,100 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/607fd5c6574c:0, corePoolSize=5, maxPoolSize=5 2024-12-04T06:55:36,100 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/607fd5c6574c:0, corePoolSize=5, maxPoolSize=5 2024-12-04T06:55:36,100 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/607fd5c6574c:0, corePoolSize=10, maxPoolSize=10 2024-12-04T06:55:36,100 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:55:36,100 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/607fd5c6574c:0, corePoolSize=2, maxPoolSize=2 2024-12-04T06:55:36,100 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:55:36,101 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733295366100 2024-12-04T06:55:36,101 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-04T06:55:36,101 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-04T06:55:36,101 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-04T06:55:36,101 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-04T06:55:36,101 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-04T06:55:36,101 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-04T06:55:36,101 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-04T06:55:36,101 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T06:55:36,101 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-04T06:55:36,101 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-04T06:55:36,102 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-04T06:55:36,102 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-04T06:55:36,102 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-04T06:55:36,102 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-04T06:55:36,102 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.large.0-1733295336102,5,FailOnTimeoutGroup] 2024-12-04T06:55:36,102 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:55:36,102 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.small.0-1733295336102,5,FailOnTimeoutGroup] 2024-12-04T06:55:36,102 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-04T06:55:36,102 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-04T06:55:36,102 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-04T06:55:36,103 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-04T06:55:36,102 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-04T06:55:36,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42213 is added to blk_1073741831_1007 (size=1321) 2024-12-04T06:55:36,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38767 is added to blk_1073741831_1007 (size=1321) 2024-12-04T06:55:36,110 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-04T06:55:36,111 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc 2024-12-04T06:55:36,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38767 is added to blk_1073741832_1008 (size=32) 2024-12-04T06:55:36,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42213 is added to blk_1073741832_1008 (size=32) 2024-12-04T06:55:36,117 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:55:36,118 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-04T06:55:36,119 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-04T06:55:36,119 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:55:36,120 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:55:36,120 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-04T06:55:36,121 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-04T06:55:36,121 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:55:36,122 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:55:36,122 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-04T06:55:36,123 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-04T06:55:36,123 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:55:36,123 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:55:36,123 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-04T06:55:36,124 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-04T06:55:36,125 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:55:36,125 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:55:36,125 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-04T06:55:36,126 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/data/hbase/meta/1588230740 2024-12-04T06:55:36,126 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/data/hbase/meta/1588230740 2024-12-04T06:55:36,127 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-04T06:55:36,127 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-04T06:55:36,128 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T06:55:36,129 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-04T06:55:36,131 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T06:55:36,131 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=831636, jitterRate=0.057480588555336}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T06:55:36,132 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733295336117Initializing all the Stores at 1733295336118 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295336118Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295336118Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295336118Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295336118Cleaning up temporary data from old regions at 1733295336127 (+9 ms)Region opened successfully at 1733295336132 (+5 ms) 2024-12-04T06:55:36,132 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-04T06:55:36,132 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-04T06:55:36,132 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-04T06:55:36,132 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-04T06:55:36,132 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-04T06:55:36,132 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-04T06:55:36,132 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733295336132Disabling compacts and flushes for region at 1733295336132Disabling writes for close at 1733295336132Writing region close event to WAL at 1733295336132Closed at 1733295336132 2024-12-04T06:55:36,133 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T06:55:36,133 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-04T06:55:36,133 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-04T06:55:36,135 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-04T06:55:36,136 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-04T06:55:36,154 INFO [RS:0;607fd5c6574c:35753 {}] regionserver.HRegionServer(746): ClusterId : 9b5617ce-d211-4738-81c6-a467f86f607e 2024-12-04T06:55:36,154 DEBUG [RS:0;607fd5c6574c:35753 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-04T06:55:36,156 DEBUG [RS:0;607fd5c6574c:35753 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-04T06:55:36,156 DEBUG [RS:0;607fd5c6574c:35753 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-04T06:55:36,160 DEBUG [RS:0;607fd5c6574c:35753 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-04T06:55:36,160 DEBUG [RS:0;607fd5c6574c:35753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10e57389, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=607fd5c6574c/172.17.0.2:0 2024-12-04T06:55:36,162 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:36,173 DEBUG [RS:0;607fd5c6574c:35753 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;607fd5c6574c:35753 2024-12-04T06:55:36,173 INFO [RS:0;607fd5c6574c:35753 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-04T06:55:36,173 INFO [RS:0;607fd5c6574c:35753 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-04T06:55:36,173 DEBUG [RS:0;607fd5c6574c:35753 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-04T06:55:36,173 INFO [RS:0;607fd5c6574c:35753 {}] regionserver.HRegionServer(2659): reportForDuty to master=607fd5c6574c,46787,1733295335874 with port=35753, startcode=1733295335935 2024-12-04T06:55:36,174 DEBUG [RS:0;607fd5c6574c:35753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-04T06:55:36,176 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35029, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-12-04T06:55:36,176 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46787 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 607fd5c6574c,35753,1733295335935 2024-12-04T06:55:36,176 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46787 {}] master.ServerManager(517): Registering regionserver=607fd5c6574c,35753,1733295335935 2024-12-04T06:55:36,178 DEBUG [RS:0;607fd5c6574c:35753 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc 2024-12-04T06:55:36,178 DEBUG [RS:0;607fd5c6574c:35753 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39191 2024-12-04T06:55:36,178 DEBUG [RS:0;607fd5c6574c:35753 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-04T06:55:36,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46787-0x1017c41d4390000, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T06:55:36,180 DEBUG [RS:0;607fd5c6574c:35753 {}] zookeeper.ZKUtil(111): regionserver:35753-0x1017c41d4390001, quorum=127.0.0.1:57336, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/607fd5c6574c,35753,1733295335935 2024-12-04T06:55:36,180 WARN [RS:0;607fd5c6574c:35753 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T06:55:36,180 INFO [RS:0;607fd5c6574c:35753 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T06:55:36,180 DEBUG [RS:0;607fd5c6574c:35753 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/WALs/607fd5c6574c,35753,1733295335935 2024-12-04T06:55:36,180 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [607fd5c6574c,35753,1733295335935] 2024-12-04T06:55:36,183 INFO [RS:0;607fd5c6574c:35753 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-04T06:55:36,185 INFO [RS:0;607fd5c6574c:35753 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-04T06:55:36,186 INFO [RS:0;607fd5c6574c:35753 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-04T06:55:36,186 INFO [RS:0;607fd5c6574c:35753 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T06:55:36,186 INFO [RS:0;607fd5c6574c:35753 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-04T06:55:36,187 INFO [RS:0;607fd5c6574c:35753 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-04T06:55:36,187 INFO [RS:0;607fd5c6574c:35753 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-04T06:55:36,187 DEBUG [RS:0;607fd5c6574c:35753 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:55:36,187 DEBUG [RS:0;607fd5c6574c:35753 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:55:36,187 DEBUG [RS:0;607fd5c6574c:35753 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:55:36,187 DEBUG [RS:0;607fd5c6574c:35753 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:55:36,187 DEBUG [RS:0;607fd5c6574c:35753 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:55:36,187 DEBUG [RS:0;607fd5c6574c:35753 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/607fd5c6574c:0, corePoolSize=2, maxPoolSize=2 2024-12-04T06:55:36,187 DEBUG [RS:0;607fd5c6574c:35753 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:55:36,187 DEBUG [RS:0;607fd5c6574c:35753 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:55:36,187 DEBUG [RS:0;607fd5c6574c:35753 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:55:36,187 DEBUG [RS:0;607fd5c6574c:35753 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:55:36,187 DEBUG [RS:0;607fd5c6574c:35753 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:55:36,187 DEBUG [RS:0;607fd5c6574c:35753 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/607fd5c6574c:0, corePoolSize=1, maxPoolSize=1 2024-12-04T06:55:36,187 DEBUG [RS:0;607fd5c6574c:35753 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/607fd5c6574c:0, corePoolSize=3, maxPoolSize=3 2024-12-04T06:55:36,187 DEBUG [RS:0;607fd5c6574c:35753 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/607fd5c6574c:0, corePoolSize=3, maxPoolSize=3 2024-12-04T06:55:36,188 INFO [RS:0;607fd5c6574c:35753 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T06:55:36,188 INFO [RS:0;607fd5c6574c:35753 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T06:55:36,188 INFO [RS:0;607fd5c6574c:35753 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T06:55:36,188 INFO [RS:0;607fd5c6574c:35753 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-04T06:55:36,188 INFO [RS:0;607fd5c6574c:35753 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-04T06:55:36,188 INFO [RS:0;607fd5c6574c:35753 {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,35753,1733295335935-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T06:55:36,204 INFO [RS:0;607fd5c6574c:35753 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-04T06:55:36,204 INFO [RS:0;607fd5c6574c:35753 {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,35753,1733295335935-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T06:55:36,204 INFO [RS:0;607fd5c6574c:35753 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:55:36,204 INFO [RS:0;607fd5c6574c:35753 {}] regionserver.Replication(171): 607fd5c6574c,35753,1733295335935 started 2024-12-04T06:55:36,218 INFO [RS:0;607fd5c6574c:35753 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:55:36,218 INFO [RS:0;607fd5c6574c:35753 {}] regionserver.HRegionServer(1482): Serving as 607fd5c6574c,35753,1733295335935, RpcServer on 607fd5c6574c/172.17.0.2:35753, sessionid=0x1017c41d4390001 2024-12-04T06:55:36,218 DEBUG [RS:0;607fd5c6574c:35753 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-04T06:55:36,218 DEBUG [RS:0;607fd5c6574c:35753 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 607fd5c6574c,35753,1733295335935 2024-12-04T06:55:36,218 DEBUG [RS:0;607fd5c6574c:35753 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '607fd5c6574c,35753,1733295335935' 2024-12-04T06:55:36,218 DEBUG [RS:0;607fd5c6574c:35753 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-04T06:55:36,219 DEBUG [RS:0;607fd5c6574c:35753 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-04T06:55:36,219 DEBUG [RS:0;607fd5c6574c:35753 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-04T06:55:36,219 DEBUG [RS:0;607fd5c6574c:35753 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-04T06:55:36,219 DEBUG [RS:0;607fd5c6574c:35753 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 607fd5c6574c,35753,1733295335935 2024-12-04T06:55:36,219 DEBUG [RS:0;607fd5c6574c:35753 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '607fd5c6574c,35753,1733295335935' 2024-12-04T06:55:36,219 DEBUG [RS:0;607fd5c6574c:35753 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-04T06:55:36,220 DEBUG [RS:0;607fd5c6574c:35753 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-04T06:55:36,220 DEBUG [RS:0;607fd5c6574c:35753 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-04T06:55:36,220 INFO [RS:0;607fd5c6574c:35753 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-04T06:55:36,220 INFO [RS:0;607fd5c6574c:35753 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-04T06:55:36,286 WARN [607fd5c6574c:46787 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-04T06:55:36,322 INFO [RS:0;607fd5c6574c:35753 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=607fd5c6574c%2C35753%2C1733295335935, suffix=, logDir=hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/WALs/607fd5c6574c,35753,1733295335935, archiveDir=hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/oldWALs, maxLogs=32 2024-12-04T06:55:36,322 INFO [RS:0;607fd5c6574c:35753 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C35753%2C1733295335935.1733295336322 2024-12-04T06:55:36,336 INFO [RS:0;607fd5c6574c:35753 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/WALs/607fd5c6574c,35753,1733295335935/607fd5c6574c%2C35753%2C1733295335935.1733295336322 2024-12-04T06:55:36,336 DEBUG [RS:0;607fd5c6574c:35753 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34811:34811),(127.0.0.1/127.0.0.1:44181:44181)] 2024-12-04T06:55:36,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:36,536 DEBUG [607fd5c6574c:46787 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-04T06:55:36,537 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=607fd5c6574c,35753,1733295335935 2024-12-04T06:55:36,538 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 607fd5c6574c,35753,1733295335935, state=OPENING 2024-12-04T06:55:36,539 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-04T06:55:36,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46787-0x1017c41d4390000, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:55:36,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35753-0x1017c41d4390001, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:55:36,542 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-04T06:55:36,542 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=607fd5c6574c,35753,1733295335935}] 2024-12-04T06:55:36,542 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T06:55:36,542 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T06:55:36,693 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-04T06:55:36,695 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48463, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-04T06:55:36,699 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-04T06:55:36,699 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T06:55:36,701 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=607fd5c6574c%2C35753%2C1733295335935.meta, suffix=.meta, logDir=hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/WALs/607fd5c6574c,35753,1733295335935, archiveDir=hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/oldWALs, maxLogs=32 2024-12-04T06:55:36,701 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 607fd5c6574c%2C35753%2C1733295335935.meta.1733295336701.meta 2024-12-04T06:55:36,709 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/WALs/607fd5c6574c,35753,1733295335935/607fd5c6574c%2C35753%2C1733295335935.meta.1733295336701.meta 2024-12-04T06:55:36,710 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34811:34811),(127.0.0.1/127.0.0.1:44181:44181)] 2024-12-04T06:55:36,711 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-04T06:55:36,712 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-04T06:55:36,712 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-04T06:55:36,712 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-04T06:55:36,712 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-04T06:55:36,712 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T06:55:36,712 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-04T06:55:36,712 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-04T06:55:36,714 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-04T06:55:36,714 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-04T06:55:36,714 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:55:36,715 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:55:36,715 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-04T06:55:36,716 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-04T06:55:36,716 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:55:36,716 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:55:36,716 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-04T06:55:36,717 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-04T06:55:36,717 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:55:36,717 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:55:36,717 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-04T06:55:36,718 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-04T06:55:36,718 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T06:55:36,718 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T06:55:36,718 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-04T06:55:36,719 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/data/hbase/meta/1588230740 2024-12-04T06:55:36,720 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/data/hbase/meta/1588230740 2024-12-04T06:55:36,721 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-04T06:55:36,722 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-04T06:55:36,722 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T06:55:36,723 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-04T06:55:36,724 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=757397, jitterRate=-0.03692008554935455}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T06:55:36,724 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-04T06:55:36,725 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733295336712Writing region info on filesystem at 1733295336712Initializing all the Stores at 1733295336713 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295336713Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295336713Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733295336713Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733295336713Cleaning up temporary data from old regions at 1733295336722 (+9 ms)Running coprocessor post-open hooks at 1733295336724 (+2 ms)Region opened successfully at 1733295336725 (+1 ms) 2024-12-04T06:55:36,726 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733295336693 2024-12-04T06:55:36,728 DEBUG [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-04T06:55:36,728 INFO [RS_OPEN_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-04T06:55:36,729 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=607fd5c6574c,35753,1733295335935 2024-12-04T06:55:36,730 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 607fd5c6574c,35753,1733295335935, state=OPEN 2024-12-04T06:55:36,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46787-0x1017c41d4390000, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T06:55:36,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35753-0x1017c41d4390001, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T06:55:36,736 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=607fd5c6574c,35753,1733295335935 2024-12-04T06:55:36,736 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T06:55:36,736 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T06:55:36,738 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-04T06:55:36,738 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=607fd5c6574c,35753,1733295335935 in 194 msec 2024-12-04T06:55:36,741 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-04T06:55:36,741 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 605 msec 2024-12-04T06:55:36,742 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T06:55:36,742 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-04T06:55:36,743 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-04T06:55:36,743 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=607fd5c6574c,35753,1733295335935, seqNum=-1] 2024-12-04T06:55:36,744 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T06:55:36,745 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47071, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T06:55:36,750 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 652 msec 2024-12-04T06:55:36,750 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733295336750, completionTime=-1 2024-12-04T06:55:36,750 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-04T06:55:36,750 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-04T06:55:36,751 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-04T06:55:36,751 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733295396751 2024-12-04T06:55:36,751 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733295456751 2024-12-04T06:55:36,751 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-04T06:55:36,752 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,46787,1733295335874-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T06:55:36,752 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,46787,1733295335874-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:55:36,752 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,46787,1733295335874-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:55:36,752 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-607fd5c6574c:46787, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:55:36,752 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-04T06:55:36,752 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-04T06:55:36,754 DEBUG [master/607fd5c6574c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-04T06:55:36,756 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.783sec 2024-12-04T06:55:36,756 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-04T06:55:36,756 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-04T06:55:36,756 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-04T06:55:36,756 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-04T06:55:36,756 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-04T06:55:36,756 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,46787,1733295335874-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T06:55:36,756 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,46787,1733295335874-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-04T06:55:36,758 DEBUG [master/607fd5c6574c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-04T06:55:36,758 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-04T06:55:36,758 INFO [master/607fd5c6574c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=607fd5c6574c,46787,1733295335874-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T06:55:36,855 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@734134af, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T06:55:36,855 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 607fd5c6574c,46787,-1 for getting cluster id 2024-12-04T06:55:36,855 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-04T06:55:36,857 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '9b5617ce-d211-4738-81c6-a467f86f607e' 2024-12-04T06:55:36,857 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-04T06:55:36,857 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "9b5617ce-d211-4738-81c6-a467f86f607e" 2024-12-04T06:55:36,857 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cfe972c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T06:55:36,857 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [607fd5c6574c,46787,-1] 2024-12-04T06:55:36,858 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-04T06:55:36,858 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:55:36,859 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53560, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-04T06:55:36,860 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@fa9d1ff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T06:55:36,860 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-04T06:55:36,861 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=607fd5c6574c,35753,1733295335935, seqNum=-1] 2024-12-04T06:55:36,861 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T06:55:36,862 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34132, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T06:55:36,864 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=607fd5c6574c,46787,1733295335874 2024-12-04T06:55:36,864 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T06:55:36,867 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-04T06:55:36,867 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T06:55:36,869 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/WALs/test.com,8080,1, archiveDir=hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/oldWALs, maxLogs=32 2024-12-04T06:55:36,869 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733295336869 2024-12-04T06:55:36,874 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/WALs/test.com,8080,1/test.com%2C8080%2C1.1733295336869 2024-12-04T06:55:36,875 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44181:44181),(127.0.0.1/127.0.0.1:34811:34811)] 2024-12-04T06:55:36,876 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733295336876 2024-12-04T06:55:36,881 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:36,881 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:36,882 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:36,882 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:36,882 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:36,882 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/WALs/test.com,8080,1/test.com%2C8080%2C1.1733295336869 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/WALs/test.com,8080,1/test.com%2C8080%2C1.1733295336876 2024-12-04T06:55:36,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38767 is added to blk_1073741835_1011 (size=93) 2024-12-04T06:55:36,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42213 is added to blk_1073741835_1011 (size=93) 2024-12-04T06:55:36,885 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34811:34811),(127.0.0.1/127.0.0.1:44181:44181)] 2024-12-04T06:55:36,885 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/WALs/test.com,8080,1/test.com%2C8080%2C1.1733295336869 to hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/oldWALs/test.com%2C8080%2C1.1733295336869 2024-12-04T06:55:36,886 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:36,886 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:36,886 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:36,886 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:36,886 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:36,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42213 is added to blk_1073741836_1012 (size=93) 2024-12-04T06:55:36,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38767 is added to blk_1073741836_1012 (size=93) 2024-12-04T06:55:36,890 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/oldWALs 2024-12-04T06:55:36,890 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1733295336876) 2024-12-04T06:55:36,890 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-04T06:55:36,890 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-04T06:55:36,890 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T06:55:36,890 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:55:36,890 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:55:36,890 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-04T06:55:36,890 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-04T06:55:36,890 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=34761756, stopped=false 2024-12-04T06:55:36,890 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=607fd5c6574c,46787,1733295335874 2024-12-04T06:55:36,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35753-0x1017c41d4390001, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T06:55:36,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35753-0x1017c41d4390001, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:55:36,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46787-0x1017c41d4390000, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T06:55:36,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46787-0x1017c41d4390000, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:55:36,892 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-04T06:55:36,892 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-04T06:55:36,892 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T06:55:36,892 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:55:36,893 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '607fd5c6574c,35753,1733295335935' ***** 2024-12-04T06:55:36,893 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-04T06:55:36,893 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35753-0x1017c41d4390001, quorum=127.0.0.1:57336, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T06:55:36,893 INFO [RS:0;607fd5c6574c:35753 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-04T06:55:36,893 INFO [RS:0;607fd5c6574c:35753 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-04T06:55:36,893 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-04T06:55:36,893 INFO [RS:0;607fd5c6574c:35753 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-04T06:55:36,893 INFO [RS:0;607fd5c6574c:35753 {}] regionserver.HRegionServer(959): stopping server 607fd5c6574c,35753,1733295335935 2024-12-04T06:55:36,893 INFO [RS:0;607fd5c6574c:35753 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T06:55:36,893 INFO [RS:0;607fd5c6574c:35753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;607fd5c6574c:35753. 2024-12-04T06:55:36,893 DEBUG [RS:0;607fd5c6574c:35753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T06:55:36,893 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46787-0x1017c41d4390000, quorum=127.0.0.1:57336, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T06:55:36,893 DEBUG [RS:0;607fd5c6574c:35753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:55:36,893 INFO [RS:0;607fd5c6574c:35753 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-04T06:55:36,893 INFO [RS:0;607fd5c6574c:35753 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-04T06:55:36,893 INFO [RS:0;607fd5c6574c:35753 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-04T06:55:36,893 INFO [RS:0;607fd5c6574c:35753 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-04T06:55:36,894 INFO [RS:0;607fd5c6574c:35753 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-04T06:55:36,894 DEBUG [RS:0;607fd5c6574c:35753 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-04T06:55:36,894 DEBUG [RS:0;607fd5c6574c:35753 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-04T06:55:36,894 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-04T06:55:36,894 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-04T06:55:36,894 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-04T06:55:36,894 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-04T06:55:36,894 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-04T06:55:36,894 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-04T06:55:36,915 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/data/hbase/meta/1588230740/.tmp/ns/437d4abac1c84188bbeba99b4a398332 is 43, key is default/ns:d/1733295336745/Put/seqid=0 2024-12-04T06:55:36,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42213 is added to blk_1073741837_1013 (size=5153) 2024-12-04T06:55:36,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38767 is added to blk_1073741837_1013 (size=5153) 2024-12-04T06:55:36,920 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/data/hbase/meta/1588230740/.tmp/ns/437d4abac1c84188bbeba99b4a398332 2024-12-04T06:55:36,925 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/data/hbase/meta/1588230740/.tmp/ns/437d4abac1c84188bbeba99b4a398332 as hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/data/hbase/meta/1588230740/ns/437d4abac1c84188bbeba99b4a398332 2024-12-04T06:55:36,931 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/data/hbase/meta/1588230740/ns/437d4abac1c84188bbeba99b4a398332, entries=2, sequenceid=6, filesize=5.0 K 2024-12-04T06:55:36,932 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 38ms, sequenceid=6, compaction requested=false 2024-12-04T06:55:36,936 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-04T06:55:36,936 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-04T06:55:36,936 INFO [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-04T06:55:36,936 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733295336894Running coprocessor pre-close hooks at 1733295336894Disabling compacts and flushes for region at 1733295336894Disabling writes for close at 1733295336894Obtaining lock to block concurrent updates at 1733295336894Preparing flush snapshotting stores in 1588230740 at 1733295336894Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733295336894Flushing stores of hbase:meta,,1.1588230740 at 1733295336895 (+1 ms)Flushing 1588230740/ns: creating writer at 1733295336895Flushing 1588230740/ns: appending metadata at 1733295336914 (+19 ms)Flushing 1588230740/ns: closing flushed file at 1733295336914Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@370c57b8: reopening flushed file at 1733295336925 (+11 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 38ms, sequenceid=6, compaction requested=false at 1733295336932 (+7 ms)Writing region close event to WAL at 1733295336932Running coprocessor post-close hooks at 1733295336936 (+4 ms)Closed at 1733295336936 2024-12-04T06:55:36,936 DEBUG [RS_CLOSE_META-regionserver/607fd5c6574c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-04T06:55:37,094 INFO [RS:0;607fd5c6574c:35753 {}] regionserver.HRegionServer(976): stopping server 607fd5c6574c,35753,1733295335935; all regions closed. 2024-12-04T06:55:37,094 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:37,095 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:37,095 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:37,095 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:37,095 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:37,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42213 is added to blk_1073741834_1010 (size=1152) 2024-12-04T06:55:37,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38767 is added to blk_1073741834_1010 (size=1152) 2024-12-04T06:55:37,099 DEBUG [RS:0;607fd5c6574c:35753 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/oldWALs 2024-12-04T06:55:37,099 INFO [RS:0;607fd5c6574c:35753 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 607fd5c6574c%2C35753%2C1733295335935.meta:.meta(num 1733295336701) 2024-12-04T06:55:37,100 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:37,100 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:37,100 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:37,100 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:37,100 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:37,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42213 is added to blk_1073741833_1009 (size=93) 2024-12-04T06:55:37,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38767 is added to blk_1073741833_1009 (size=93) 2024-12-04T06:55:37,104 DEBUG [RS:0;607fd5c6574c:35753 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/oldWALs 2024-12-04T06:55:37,104 INFO [RS:0;607fd5c6574c:35753 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 607fd5c6574c%2C35753%2C1733295335935:(num 1733295336322) 2024-12-04T06:55:37,104 DEBUG [RS:0;607fd5c6574c:35753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T06:55:37,104 INFO [RS:0;607fd5c6574c:35753 {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T06:55:37,104 INFO [RS:0;607fd5c6574c:35753 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T06:55:37,104 INFO [RS:0;607fd5c6574c:35753 {}] hbase.ChoreService(370): Chore service for: regionserver/607fd5c6574c:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-04T06:55:37,105 INFO [RS:0;607fd5c6574c:35753 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T06:55:37,105 INFO [regionserver/607fd5c6574c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T06:55:37,105 INFO [RS:0;607fd5c6574c:35753 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35753 2024-12-04T06:55:37,107 INFO [RS:0;607fd5c6574c:35753 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T06:55:37,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46787-0x1017c41d4390000, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T06:55:37,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35753-0x1017c41d4390001, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/607fd5c6574c,35753,1733295335935 2024-12-04T06:55:37,108 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [607fd5c6574c,35753,1733295335935] 2024-12-04T06:55:37,110 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/607fd5c6574c,35753,1733295335935 already deleted, retry=false 2024-12-04T06:55:37,110 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 607fd5c6574c,35753,1733295335935 expired; onlineServers=0 2024-12-04T06:55:37,110 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '607fd5c6574c,46787,1733295335874' ***** 2024-12-04T06:55:37,110 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-04T06:55:37,110 INFO [M:0;607fd5c6574c:46787 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T06:55:37,110 INFO [M:0;607fd5c6574c:46787 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T06:55:37,110 DEBUG [M:0;607fd5c6574c:46787 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-04T06:55:37,110 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-04T06:55:37,110 DEBUG [M:0;607fd5c6574c:46787 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-04T06:55:37,110 DEBUG [master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.large.0-1733295336102 {}] cleaner.HFileCleaner(306): Exit Thread[master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.large.0-1733295336102,5,FailOnTimeoutGroup] 2024-12-04T06:55:37,110 DEBUG [master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.small.0-1733295336102 {}] cleaner.HFileCleaner(306): Exit Thread[master/607fd5c6574c:0:becomeActiveMaster-HFileCleaner.small.0-1733295336102,5,FailOnTimeoutGroup] 2024-12-04T06:55:37,110 INFO [M:0;607fd5c6574c:46787 {}] hbase.ChoreService(370): Chore service for: master/607fd5c6574c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-04T06:55:37,111 INFO [M:0;607fd5c6574c:46787 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T06:55:37,111 DEBUG [M:0;607fd5c6574c:46787 {}] master.HMaster(1795): Stopping service threads 2024-12-04T06:55:37,111 INFO [M:0;607fd5c6574c:46787 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-04T06:55:37,111 INFO [M:0;607fd5c6574c:46787 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-04T06:55:37,111 INFO [M:0;607fd5c6574c:46787 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-04T06:55:37,111 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-04T06:55:37,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46787-0x1017c41d4390000, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-04T06:55:37,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46787-0x1017c41d4390000, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T06:55:37,112 DEBUG [M:0;607fd5c6574c:46787 {}] zookeeper.ZKUtil(347): master:46787-0x1017c41d4390000, quorum=127.0.0.1:57336, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-04T06:55:37,112 WARN [M:0;607fd5c6574c:46787 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-04T06:55:37,116 INFO [M:0;607fd5c6574c:46787 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/.lastflushedseqids 2024-12-04T06:55:37,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38767 is added to blk_1073741838_1014 (size=99) 2024-12-04T06:55:37,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42213 is added to blk_1073741838_1014 (size=99) 2024-12-04T06:55:37,126 INFO [M:0;607fd5c6574c:46787 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-04T06:55:37,126 INFO [M:0;607fd5c6574c:46787 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-04T06:55:37,126 DEBUG [M:0;607fd5c6574c:46787 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-04T06:55:37,126 INFO [M:0;607fd5c6574c:46787 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:55:37,126 DEBUG [M:0;607fd5c6574c:46787 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:55:37,126 DEBUG [M:0;607fd5c6574c:46787 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-04T06:55:37,126 DEBUG [M:0;607fd5c6574c:46787 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:55:37,126 INFO [M:0;607fd5c6574c:46787 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-04T06:55:37,142 DEBUG [M:0;607fd5c6574c:46787 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e7075499cd4148dc8b85a1ea87b7182b is 82, key is hbase:meta,,1/info:regioninfo/1733295336729/Put/seqid=0 2024-12-04T06:55:37,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38767 is added to blk_1073741839_1015 (size=5672) 2024-12-04T06:55:37,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42213 is added to blk_1073741839_1015 (size=5672) 2024-12-04T06:55:37,148 INFO [M:0;607fd5c6574c:46787 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e7075499cd4148dc8b85a1ea87b7182b 2024-12-04T06:55:37,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,41491,1733295133720/607fd5c6574c%2C41491%2C1733295133720.1733295133925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:37,174 DEBUG [M:0;607fd5c6574c:46787 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5a34ca88e2404cc88d03954124ac5e9c is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733295336749/Put/seqid=0 2024-12-04T06:55:37,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38767 is added to blk_1073741840_1016 (size=5275) 2024-12-04T06:55:37,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42213 is added to blk_1073741840_1016 (size=5275) 2024-12-04T06:55:37,181 INFO [M:0;607fd5c6574c:46787 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5a34ca88e2404cc88d03954124ac5e9c 2024-12-04T06:55:37,203 DEBUG [M:0;607fd5c6574c:46787 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0fd27e6adb5b474b999890a298cede8e is 69, key is 607fd5c6574c,35753,1733295335935/rs:state/1733295336177/Put/seqid=0 2024-12-04T06:55:37,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38767 is added to blk_1073741841_1017 (size=5156) 2024-12-04T06:55:37,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42213 is added to blk_1073741841_1017 (size=5156) 2024-12-04T06:55:37,208 INFO [RS:0;607fd5c6574c:35753 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T06:55:37,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35753-0x1017c41d4390001, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T06:55:37,208 INFO [RS:0;607fd5c6574c:35753 {}] regionserver.HRegionServer(1031): Exiting; stopping=607fd5c6574c,35753,1733295335935; zookeeper connection closed. 2024-12-04T06:55:37,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35753-0x1017c41d4390001, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T06:55:37,209 INFO [M:0;607fd5c6574c:46787 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0fd27e6adb5b474b999890a298cede8e 2024-12-04T06:55:37,209 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@28931b76 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@28931b76 2024-12-04T06:55:37,209 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-04T06:55:37,227 DEBUG [M:0;607fd5c6574c:46787 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/64aabb37626c432a9a4e39fa3a99171c is 52, key is load_balancer_on/state:d/1733295336866/Put/seqid=0 2024-12-04T06:55:37,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38767 is added to blk_1073741842_1018 (size=5056) 2024-12-04T06:55:37,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42213 is added to blk_1073741842_1018 (size=5056) 2024-12-04T06:55:37,232 INFO [M:0;607fd5c6574c:46787 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/64aabb37626c432a9a4e39fa3a99171c 2024-12-04T06:55:37,237 DEBUG [M:0;607fd5c6574c:46787 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e7075499cd4148dc8b85a1ea87b7182b as hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e7075499cd4148dc8b85a1ea87b7182b 2024-12-04T06:55:37,241 INFO [M:0;607fd5c6574c:46787 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e7075499cd4148dc8b85a1ea87b7182b, entries=8, sequenceid=29, filesize=5.5 K 2024-12-04T06:55:37,242 DEBUG [M:0;607fd5c6574c:46787 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5a34ca88e2404cc88d03954124ac5e9c as hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5a34ca88e2404cc88d03954124ac5e9c 2024-12-04T06:55:37,246 INFO [M:0;607fd5c6574c:46787 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5a34ca88e2404cc88d03954124ac5e9c, entries=3, sequenceid=29, filesize=5.2 K 2024-12-04T06:55:37,247 DEBUG [M:0;607fd5c6574c:46787 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0fd27e6adb5b474b999890a298cede8e as hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0fd27e6adb5b474b999890a298cede8e 2024-12-04T06:55:37,251 INFO [M:0;607fd5c6574c:46787 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0fd27e6adb5b474b999890a298cede8e, entries=1, sequenceid=29, filesize=5.0 K 2024-12-04T06:55:37,252 DEBUG [M:0;607fd5c6574c:46787 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/64aabb37626c432a9a4e39fa3a99171c as hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/64aabb37626c432a9a4e39fa3a99171c 2024-12-04T06:55:37,257 INFO [M:0;607fd5c6574c:46787 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39191/user/jenkins/test-data/473e4993-b05c-2e31-e602-a8019e70f5cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/64aabb37626c432a9a4e39fa3a99171c, entries=1, sequenceid=29, filesize=4.9 K 2024-12-04T06:55:37,258 INFO [M:0;607fd5c6574c:46787 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 132ms, sequenceid=29, compaction requested=false 2024-12-04T06:55:37,260 INFO [M:0;607fd5c6574c:46787 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T06:55:37,260 DEBUG [M:0;607fd5c6574c:46787 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733295337126Disabling compacts and flushes for region at 1733295337126Disabling writes for close at 1733295337126Obtaining lock to block concurrent updates at 1733295337126Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733295337126Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733295337126Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733295337127 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733295337127Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733295337142 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733295337142Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733295337153 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733295337174 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733295337174Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733295337185 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733295337203 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733295337203Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733295337213 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733295337227 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733295337227Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1a6be0: reopening flushed file at 1733295337236 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2b7a317d: reopening flushed file at 1733295337241 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3aa5bf64: reopening flushed file at 1733295337246 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@78d56255: reopening flushed file at 1733295337252 (+6 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 132ms, sequenceid=29, compaction requested=false at 1733295337258 (+6 ms)Writing region close event to WAL at 1733295337259 (+1 ms)Closed at 1733295337259 2024-12-04T06:55:37,260 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:37,260 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:37,260 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:37,260 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:37,261 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T06:55:37,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38767 is added to blk_1073741830_1006 (size=10311) 2024-12-04T06:55:37,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42213 is added to blk_1073741830_1006 (size=10311) 2024-12-04T06:55:37,263 INFO [M:0;607fd5c6574c:46787 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-04T06:55:37,263 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T06:55:37,263 INFO [M:0;607fd5c6574c:46787 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46787 2024-12-04T06:55:37,263 INFO [M:0;607fd5c6574c:46787 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T06:55:37,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46787-0x1017c41d4390000, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T06:55:37,366 INFO [M:0;607fd5c6574c:46787 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T06:55:37,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46787-0x1017c41d4390000, quorum=127.0.0.1:57336, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T06:55:37,368 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@372f7d77{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:55:37,368 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3ffef2a8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T06:55:37,369 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T06:55:37,369 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@814e400{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T06:55:37,369 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@238bf9b3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/hadoop.log.dir/,STOPPED} 2024-12-04T06:55:37,370 WARN [BP-116886128-172.17.0.2-1733295335132 heartbeating to localhost/127.0.0.1:39191 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T06:55:37,370 WARN [BP-116886128-172.17.0.2-1733295335132 heartbeating to localhost/127.0.0.1:39191 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-116886128-172.17.0.2-1733295335132 (Datanode Uuid 89e9b2f6-273b-4e08-8638-f2879e8ec936) service to localhost/127.0.0.1:39191 2024-12-04T06:55:37,370 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T06:55:37,370 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T06:55:37,371 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/cluster_621cef8b-624b-8449-63fc-c819cdddd2fd/data/data3/current/BP-116886128-172.17.0.2-1733295335132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:55:37,371 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/cluster_621cef8b-624b-8449-63fc-c819cdddd2fd/data/data4/current/BP-116886128-172.17.0.2-1733295335132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:55:37,371 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T06:55:37,373 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@483384c3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T06:55:37,373 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3e70e237{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T06:55:37,373 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T06:55:37,373 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6c8a9439{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T06:55:37,374 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24bb5ef7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/hadoop.log.dir/,STOPPED} 2024-12-04T06:55:37,375 WARN [BP-116886128-172.17.0.2-1733295335132 heartbeating to localhost/127.0.0.1:39191 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T06:55:37,375 WARN [BP-116886128-172.17.0.2-1733295335132 heartbeating to localhost/127.0.0.1:39191 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-116886128-172.17.0.2-1733295335132 (Datanode Uuid a9d2a0c7-971a-4cd0-9d67-07f7cdb49567) service to localhost/127.0.0.1:39191 2024-12-04T06:55:37,375 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T06:55:37,375 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T06:55:37,375 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/cluster_621cef8b-624b-8449-63fc-c819cdddd2fd/data/data1/current/BP-116886128-172.17.0.2-1733295335132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:55:37,375 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/cluster_621cef8b-624b-8449-63fc-c819cdddd2fd/data/data2/current/BP-116886128-172.17.0.2-1733295335132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T06:55:37,375 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T06:55:37,383 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5d175de5{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-04T06:55:37,383 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@51af64d5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T06:55:37,383 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T06:55:37,383 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@32d01bcd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T06:55:37,383 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d7a0e2e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d45e1791-57c1-cea7-4ee7-b72bb4ac57f9/hadoop.log.dir/,STOPPED} 2024-12-04T06:55:37,390 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-04T06:55:37,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41097/user/jenkins/test-data/b41e0716-d3b6-b39a-3b1c-172e1e09fe12/WALs/607fd5c6574c,33613,1733295132530/607fd5c6574c%2C33613%2C1733295132530.meta.1733295133523.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T06:55:37,408 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-04T06:55:37,419 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=269 (was 228) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39191 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39191 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39191 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39191 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:39191 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39191 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39191 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39191 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=536 (was 509) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=251 (was 251), ProcessCount=11 (was 11), AvailableMemoryMB=5531 (was 5539)