2024-11-17 15:27:56,752 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-17 15:27:56,763 main DEBUG Took 0.009288 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-17 15:27:56,764 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-17 15:27:56,764 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-17 15:27:56,765 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-17 15:27:56,766 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 15:27:56,772 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-17 15:27:56,783 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 15:27:56,785 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 15:27:56,786 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 15:27:56,786 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 15:27:56,786 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 15:27:56,787 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 15:27:56,787 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 15:27:56,788 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 15:27:56,788 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 15:27:56,788 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 15:27:56,789 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 15:27:56,789 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 15:27:56,790 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 15:27:56,790 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 15:27:56,790 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 15:27:56,791 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 15:27:56,791 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 15:27:56,791 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 15:27:56,792 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 15:27:56,792 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 15:27:56,792 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 15:27:56,792 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 15:27:56,793 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 15:27:56,793 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 15:27:56,793 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 15:27:56,794 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-17 15:27:56,795 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 15:27:56,796 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-17 15:27:56,798 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-17 15:27:56,798 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-17 15:27:56,799 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-17 15:27:56,799 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-17 15:27:56,808 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-17 15:27:56,811 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-17 15:27:56,812 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-17 15:27:56,812 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-17 15:27:56,813 main DEBUG createAppenders(={Console}) 2024-11-17 15:27:56,814 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-17 15:27:56,814 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-17 15:27:56,814 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-17 15:27:56,815 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-17 15:27:56,815 main DEBUG OutputStream closed 2024-11-17 15:27:56,815 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-17 15:27:56,815 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-17 15:27:56,816 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-17 15:27:56,880 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-17 15:27:56,882 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-17 15:27:56,883 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-17 15:27:56,884 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-17 15:27:56,884 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-17 15:27:56,885 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-17 15:27:56,885 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-17 15:27:56,885 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-17 15:27:56,885 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-17 15:27:56,886 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-17 15:27:56,886 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-17 15:27:56,886 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-17 15:27:56,887 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-17 15:27:56,887 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-17 15:27:56,887 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-17 15:27:56,887 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-17 15:27:56,888 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-17 15:27:56,888 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-17 15:27:56,891 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-17 15:27:56,891 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-17 15:27:56,891 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-17 15:27:56,892 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-17T15:27:57,114 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4 2024-11-17 15:27:57,117 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-17 15:27:57,118 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-17T15:27:57,127 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-17T15:27:57,159 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=258, ProcessCount=11, AvailableMemoryMB=4053 2024-11-17T15:27:57,162 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-17T15:27:57,177 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/cluster_e684ad8a-3b6f-00db-fd58-5febbe409a88, deleteOnExit=true 2024-11-17T15:27:57,178 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-17T15:27:57,179 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/test.cache.data in system properties and HBase conf 2024-11-17T15:27:57,180 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/hadoop.tmp.dir in system properties and HBase conf 2024-11-17T15:27:57,181 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/hadoop.log.dir in system properties and HBase conf 2024-11-17T15:27:57,181 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-17T15:27:57,182 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-17T15:27:57,182 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-17T15:27:57,288 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-17T15:27:57,376 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-17T15:27:57,380 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-17T15:27:57,380 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-17T15:27:57,381 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-17T15:27:57,381 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T15:27:57,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-17T15:27:57,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-17T15:27:57,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T15:27:57,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T15:27:57,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-17T15:27:57,384 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/nfs.dump.dir in system properties and HBase conf 2024-11-17T15:27:57,384 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/java.io.tmpdir in system properties and HBase conf 2024-11-17T15:27:57,384 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T15:27:57,385 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-17T15:27:57,385 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-17T15:27:57,862 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T15:27:58,211 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-17T15:27:58,299 INFO [Time-limited test {}] log.Log(170): Logging initialized @2219ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-17T15:27:58,379 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:27:58,450 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T15:27:58,470 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T15:27:58,470 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T15:27:58,471 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T15:27:58,484 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:27:58,486 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f2ab976{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/hadoop.log.dir/,AVAILABLE} 2024-11-17T15:27:58,487 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46a86f8c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T15:27:58,690 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4c77270f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/java.io.tmpdir/jetty-localhost-44171-hadoop-hdfs-3_4_1-tests_jar-_-any-6965113845132574365/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T15:27:58,697 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c5145e6{HTTP/1.1, (http/1.1)}{localhost:44171} 2024-11-17T15:27:58,697 INFO [Time-limited test {}] server.Server(415): Started @2619ms 2024-11-17T15:27:58,721 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T15:27:59,076 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:27:59,082 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T15:27:59,083 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T15:27:59,084 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T15:27:59,084 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T15:27:59,085 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2aa5bb6e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/hadoop.log.dir/,AVAILABLE} 2024-11-17T15:27:59,085 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5bdc1e47{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T15:27:59,205 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@59e63bea{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/java.io.tmpdir/jetty-localhost-38097-hadoop-hdfs-3_4_1-tests_jar-_-any-9694421118126071697/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:27:59,206 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@264a9341{HTTP/1.1, (http/1.1)}{localhost:38097} 2024-11-17T15:27:59,207 INFO [Time-limited test {}] server.Server(415): Started @3128ms 2024-11-17T15:27:59,263 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T15:27:59,385 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:27:59,390 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T15:27:59,391 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T15:27:59,391 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T15:27:59,392 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T15:27:59,393 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ae73635{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/hadoop.log.dir/,AVAILABLE} 2024-11-17T15:27:59,394 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@335a4f9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T15:27:59,529 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@55d18735{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/java.io.tmpdir/jetty-localhost-37471-hadoop-hdfs-3_4_1-tests_jar-_-any-14105403323131327396/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:27:59,530 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@15370523{HTTP/1.1, (http/1.1)}{localhost:37471} 2024-11-17T15:27:59,530 INFO [Time-limited test {}] server.Server(415): Started @3452ms 2024-11-17T15:27:59,533 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T15:27:59,765 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/cluster_e684ad8a-3b6f-00db-fd58-5febbe409a88/data/data1/current/BP-245048582-172.17.0.2-1731857277960/current, will proceed with Du for space computation calculation, 2024-11-17T15:27:59,765 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/cluster_e684ad8a-3b6f-00db-fd58-5febbe409a88/data/data3/current/BP-245048582-172.17.0.2-1731857277960/current, will proceed with Du for space computation calculation, 2024-11-17T15:27:59,768 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/cluster_e684ad8a-3b6f-00db-fd58-5febbe409a88/data/data4/current/BP-245048582-172.17.0.2-1731857277960/current, will proceed with Du for space computation calculation, 2024-11-17T15:27:59,768 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/cluster_e684ad8a-3b6f-00db-fd58-5febbe409a88/data/data2/current/BP-245048582-172.17.0.2-1731857277960/current, will proceed with Du for space computation calculation, 2024-11-17T15:27:59,831 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T15:27:59,837 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T15:27:59,923 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x117205da429881dd with lease ID 0x57d666eba39dc838: Processing first storage report for DS-ab799247-4f1a-481a-89c6-78875ed51ffa from datanode DatanodeRegistration(127.0.0.1:43077, datanodeUuid=1b3c695c-4d41-404b-9454-8fdc542079f1, infoPort=38403, infoSecurePort=0, ipcPort=36447, storageInfo=lv=-57;cid=testClusterID;nsid=850251788;c=1731857277960) 2024-11-17T15:27:59,924 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x117205da429881dd with lease ID 0x57d666eba39dc838: from storage DS-ab799247-4f1a-481a-89c6-78875ed51ffa node DatanodeRegistration(127.0.0.1:43077, datanodeUuid=1b3c695c-4d41-404b-9454-8fdc542079f1, infoPort=38403, infoSecurePort=0, ipcPort=36447, storageInfo=lv=-57;cid=testClusterID;nsid=850251788;c=1731857277960), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-17T15:27:59,925 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6a95b4b4ab765c45 with lease ID 0x57d666eba39dc837: Processing first storage report for DS-e4cd15d8-3f91-463e-9ba6-1b918a841126 from datanode DatanodeRegistration(127.0.0.1:39849, datanodeUuid=38e3011b-f23a-4452-a6cc-b599c74faa3c, infoPort=40283, infoSecurePort=0, ipcPort=42293, storageInfo=lv=-57;cid=testClusterID;nsid=850251788;c=1731857277960) 2024-11-17T15:27:59,925 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6a95b4b4ab765c45 with lease ID 0x57d666eba39dc837: from storage DS-e4cd15d8-3f91-463e-9ba6-1b918a841126 node DatanodeRegistration(127.0.0.1:39849, datanodeUuid=38e3011b-f23a-4452-a6cc-b599c74faa3c, infoPort=40283, infoSecurePort=0, ipcPort=42293, storageInfo=lv=-57;cid=testClusterID;nsid=850251788;c=1731857277960), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-17T15:27:59,925 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x117205da429881dd with lease ID 0x57d666eba39dc838: Processing first storage report for DS-3711cf48-5036-490a-bcf2-a5f0e2f134f6 from datanode DatanodeRegistration(127.0.0.1:43077, datanodeUuid=1b3c695c-4d41-404b-9454-8fdc542079f1, infoPort=38403, infoSecurePort=0, ipcPort=36447, storageInfo=lv=-57;cid=testClusterID;nsid=850251788;c=1731857277960) 2024-11-17T15:27:59,925 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x117205da429881dd with lease ID 0x57d666eba39dc838: from storage DS-3711cf48-5036-490a-bcf2-a5f0e2f134f6 node DatanodeRegistration(127.0.0.1:43077, datanodeUuid=1b3c695c-4d41-404b-9454-8fdc542079f1, infoPort=38403, infoSecurePort=0, ipcPort=36447, storageInfo=lv=-57;cid=testClusterID;nsid=850251788;c=1731857277960), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T15:27:59,925 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6a95b4b4ab765c45 with lease ID 0x57d666eba39dc837: Processing first storage report for DS-6548798d-044b-453c-b44b-6a3b4238b08b from datanode DatanodeRegistration(127.0.0.1:39849, datanodeUuid=38e3011b-f23a-4452-a6cc-b599c74faa3c, infoPort=40283, infoSecurePort=0, ipcPort=42293, storageInfo=lv=-57;cid=testClusterID;nsid=850251788;c=1731857277960) 2024-11-17T15:27:59,926 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6a95b4b4ab765c45 with lease ID 0x57d666eba39dc837: from storage DS-6548798d-044b-453c-b44b-6a3b4238b08b node DatanodeRegistration(127.0.0.1:39849, datanodeUuid=38e3011b-f23a-4452-a6cc-b599c74faa3c, infoPort=40283, infoSecurePort=0, ipcPort=42293, storageInfo=lv=-57;cid=testClusterID;nsid=850251788;c=1731857277960), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-17T15:27:59,962 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4 2024-11-17T15:28:00,052 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/cluster_e684ad8a-3b6f-00db-fd58-5febbe409a88/zookeeper_0, clientPort=56154, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/cluster_e684ad8a-3b6f-00db-fd58-5febbe409a88/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/cluster_e684ad8a-3b6f-00db-fd58-5febbe409a88/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-17T15:28:00,063 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56154 2024-11-17T15:28:00,077 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:28:00,080 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:28:00,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741825_1001 (size=7) 2024-11-17T15:28:00,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43077 is added to blk_1073741825_1001 (size=7) 2024-11-17T15:28:00,754 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d with version=8 2024-11-17T15:28:00,755 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/hbase-staging 2024-11-17T15:28:00,851 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-17T15:28:01,130 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7a780d55532c:0 server-side Connection retries=45 2024-11-17T15:28:01,142 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T15:28:01,142 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T15:28:01,147 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T15:28:01,147 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T15:28:01,147 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T15:28:01,310 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-17T15:28:01,383 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-17T15:28:01,394 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-17T15:28:01,399 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T15:28:01,428 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 20080 (auto-detected) 2024-11-17T15:28:01,429 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-17T15:28:01,449 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34909 2024-11-17T15:28:01,477 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34909 connecting to ZooKeeper ensemble=127.0.0.1:56154 2024-11-17T15:28:01,512 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:349090x0, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T15:28:01,515 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34909-0x101268ad6a90000 connected 2024-11-17T15:28:01,548 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:28:01,550 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:28:01,561 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34909-0x101268ad6a90000, quorum=127.0.0.1:56154, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T15:28:01,565 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d, hbase.cluster.distributed=false 2024-11-17T15:28:01,593 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34909-0x101268ad6a90000, quorum=127.0.0.1:56154, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T15:28:01,598 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34909 2024-11-17T15:28:01,599 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34909 2024-11-17T15:28:01,602 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34909 2024-11-17T15:28:01,603 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34909 2024-11-17T15:28:01,604 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34909 2024-11-17T15:28:01,752 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7a780d55532c:0 server-side Connection retries=45 2024-11-17T15:28:01,754 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T15:28:01,755 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T15:28:01,755 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T15:28:01,755 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T15:28:01,755 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T15:28:01,758 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-17T15:28:01,761 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T15:28:01,762 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33973 2024-11-17T15:28:01,764 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33973 connecting to ZooKeeper ensemble=127.0.0.1:56154 2024-11-17T15:28:01,766 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:28:01,772 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:28:01,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:339730x0, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T15:28:01,784 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:339730x0, quorum=127.0.0.1:56154, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T15:28:01,784 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33973-0x101268ad6a90001 connected 2024-11-17T15:28:01,790 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-17T15:28:01,801 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-17T15:28:01,804 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33973-0x101268ad6a90001, quorum=127.0.0.1:56154, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-17T15:28:01,811 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33973-0x101268ad6a90001, quorum=127.0.0.1:56154, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T15:28:01,813 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33973 2024-11-17T15:28:01,814 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33973 2024-11-17T15:28:01,814 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33973 2024-11-17T15:28:01,816 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33973 2024-11-17T15:28:01,817 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33973 2024-11-17T15:28:01,839 DEBUG [M:0;7a780d55532c:34909 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7a780d55532c:34909 2024-11-17T15:28:01,840 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7a780d55532c,34909,1731857280918 2024-11-17T15:28:01,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33973-0x101268ad6a90001, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T15:28:01,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34909-0x101268ad6a90000, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T15:28:01,850 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34909-0x101268ad6a90000, quorum=127.0.0.1:56154, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7a780d55532c,34909,1731857280918 2024-11-17T15:28:01,871 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33973-0x101268ad6a90001, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-17T15:28:01,871 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34909-0x101268ad6a90000, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:28:01,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33973-0x101268ad6a90001, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:28:01,873 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34909-0x101268ad6a90000, quorum=127.0.0.1:56154, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-17T15:28:01,875 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7a780d55532c,34909,1731857280918 from backup master directory 2024-11-17T15:28:01,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33973-0x101268ad6a90001, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T15:28:01,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34909-0x101268ad6a90000, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7a780d55532c,34909,1731857280918 2024-11-17T15:28:01,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34909-0x101268ad6a90000, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T15:28:01,879 WARN [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T15:28:01,879 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7a780d55532c,34909,1731857280918 2024-11-17T15:28:01,882 INFO [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-17T15:28:01,884 INFO [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-17T15:28:01,942 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/hbase.id] with ID: 6526ebff-589d-4563-8ba1-74a48d66d2de 2024-11-17T15:28:01,942 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/.tmp/hbase.id 2024-11-17T15:28:01,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43077 is added to blk_1073741826_1002 (size=42) 2024-11-17T15:28:01,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741826_1002 (size=42) 2024-11-17T15:28:01,955 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/.tmp/hbase.id]:[hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/hbase.id] 2024-11-17T15:28:02,001 INFO [master/7a780d55532c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:28:02,006 INFO [master/7a780d55532c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-17T15:28:02,027 INFO [master/7a780d55532c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 18ms. 2024-11-17T15:28:02,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33973-0x101268ad6a90001, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:28:02,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34909-0x101268ad6a90000, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:28:02,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43077 is added to blk_1073741827_1003 (size=196) 2024-11-17T15:28:02,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741827_1003 (size=196) 2024-11-17T15:28:02,066 INFO [master/7a780d55532c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T15:28:02,068 INFO [master/7a780d55532c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-17T15:28:02,074 INFO [master/7a780d55532c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T15:28:02,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741828_1004 (size=1189) 2024-11-17T15:28:02,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43077 is added to blk_1073741828_1004 (size=1189) 2024-11-17T15:28:02,133 INFO [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/MasterData/data/master/store 2024-11-17T15:28:02,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43077 is added to blk_1073741829_1005 (size=34) 2024-11-17T15:28:02,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741829_1005 (size=34) 2024-11-17T15:28:02,160 INFO [master/7a780d55532c:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-17T15:28:02,163 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:28:02,164 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T15:28:02,164 INFO [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:28:02,165 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:28:02,166 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T15:28:02,166 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:28:02,167 INFO [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:28:02,168 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731857282164Disabling compacts and flushes for region at 1731857282164Disabling writes for close at 1731857282166 (+2 ms)Writing region close event to WAL at 1731857282167 (+1 ms)Closed at 1731857282167 2024-11-17T15:28:02,170 WARN [master/7a780d55532c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/MasterData/data/master/store/.initializing 2024-11-17T15:28:02,171 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/MasterData/WALs/7a780d55532c,34909,1731857280918 2024-11-17T15:28:02,194 INFO [master/7a780d55532c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a780d55532c%2C34909%2C1731857280918, suffix=, logDir=hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/MasterData/WALs/7a780d55532c,34909,1731857280918, archiveDir=hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/MasterData/oldWALs, maxLogs=10 2024-11-17T15:28:02,206 INFO [master/7a780d55532c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C34909%2C1731857280918.1731857282200 2024-11-17T15:28:02,229 INFO [master/7a780d55532c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/MasterData/WALs/7a780d55532c,34909,1731857280918/7a780d55532c%2C34909%2C1731857280918.1731857282200 2024-11-17T15:28:02,239 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38403:38403),(127.0.0.1/127.0.0.1:40283:40283)] 2024-11-17T15:28:02,240 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-17T15:28:02,241 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:28:02,244 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:28:02,245 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:28:02,285 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:28:02,316 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-17T15:28:02,320 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:28:02,323 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:28:02,324 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:28:02,327 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-17T15:28:02,327 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:28:02,328 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T15:28:02,329 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:28:02,331 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-17T15:28:02,332 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:28:02,333 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T15:28:02,333 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:28:02,335 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-17T15:28:02,336 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:28:02,336 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T15:28:02,337 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:28:02,340 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:28:02,342 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:28:02,347 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:28:02,347 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:28:02,351 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-17T15:28:02,355 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:28:02,360 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T15:28:02,361 INFO [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=846511, jitterRate=0.0763954222202301}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-17T15:28:02,369 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731857282258Initializing all the Stores at 1731857282261 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857282261Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857282262 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857282262Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857282262Cleaning up temporary data from old regions at 1731857282347 (+85 ms)Region opened successfully at 1731857282369 (+22 ms) 2024-11-17T15:28:02,370 INFO [master/7a780d55532c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-17T15:28:02,408 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41f1a111, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7a780d55532c/172.17.0.2:0 2024-11-17T15:28:02,440 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-17T15:28:02,451 INFO [master/7a780d55532c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-17T15:28:02,451 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-17T15:28:02,454 INFO [master/7a780d55532c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-17T15:28:02,456 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-17T15:28:02,460 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-17T15:28:02,461 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-17T15:28:02,491 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-17T15:28:02,501 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34909-0x101268ad6a90000, quorum=127.0.0.1:56154, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-17T15:28:02,504 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-17T15:28:02,506 INFO [master/7a780d55532c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-17T15:28:02,508 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34909-0x101268ad6a90000, quorum=127.0.0.1:56154, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-17T15:28:02,509 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-17T15:28:02,512 INFO [master/7a780d55532c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-17T15:28:02,515 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34909-0x101268ad6a90000, quorum=127.0.0.1:56154, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-17T15:28:02,517 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-17T15:28:02,518 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34909-0x101268ad6a90000, quorum=127.0.0.1:56154, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-17T15:28:02,521 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-17T15:28:02,538 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34909-0x101268ad6a90000, quorum=127.0.0.1:56154, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-17T15:28:02,540 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-17T15:28:02,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34909-0x101268ad6a90000, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T15:28:02,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33973-0x101268ad6a90001, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T15:28:02,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34909-0x101268ad6a90000, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:28:02,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33973-0x101268ad6a90001, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:28:02,547 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7a780d55532c,34909,1731857280918, sessionid=0x101268ad6a90000, setting cluster-up flag (Was=false) 2024-11-17T15:28:02,560 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34909-0x101268ad6a90000, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:28:02,560 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33973-0x101268ad6a90001, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:28:02,566 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-17T15:28:02,568 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7a780d55532c,34909,1731857280918 2024-11-17T15:28:02,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34909-0x101268ad6a90000, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:28:02,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33973-0x101268ad6a90001, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:28:02,582 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-17T15:28:02,584 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7a780d55532c,34909,1731857280918 2024-11-17T15:28:02,591 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-17T15:28:02,621 INFO [RS:0;7a780d55532c:33973 {}] regionserver.HRegionServer(746): ClusterId : 6526ebff-589d-4563-8ba1-74a48d66d2de 2024-11-17T15:28:02,625 DEBUG [RS:0;7a780d55532c:33973 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-17T15:28:02,630 DEBUG [RS:0;7a780d55532c:33973 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-17T15:28:02,631 DEBUG [RS:0;7a780d55532c:33973 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-17T15:28:02,634 DEBUG [RS:0;7a780d55532c:33973 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-17T15:28:02,635 DEBUG [RS:0;7a780d55532c:33973 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3efe7a97, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7a780d55532c/172.17.0.2:0 2024-11-17T15:28:02,660 DEBUG [RS:0;7a780d55532c:33973 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7a780d55532c:33973 2024-11-17T15:28:02,664 INFO [RS:0;7a780d55532c:33973 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-17T15:28:02,664 INFO [RS:0;7a780d55532c:33973 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-17T15:28:02,664 DEBUG [RS:0;7a780d55532c:33973 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-17T15:28:02,667 INFO [RS:0;7a780d55532c:33973 {}] regionserver.HRegionServer(2659): reportForDuty to master=7a780d55532c,34909,1731857280918 with port=33973, startcode=1731857281708 2024-11-17T15:28:02,673 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-17T15:28:02,680 DEBUG [RS:0;7a780d55532c:33973 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-17T15:28:02,683 INFO [master/7a780d55532c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-17T15:28:02,689 INFO [master/7a780d55532c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-17T15:28:02,695 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7a780d55532c,34909,1731857280918 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-17T15:28:02,707 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7a780d55532c:0, corePoolSize=5, maxPoolSize=5 2024-11-17T15:28:02,707 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7a780d55532c:0, corePoolSize=5, maxPoolSize=5 2024-11-17T15:28:02,707 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7a780d55532c:0, corePoolSize=5, maxPoolSize=5 2024-11-17T15:28:02,708 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7a780d55532c:0, corePoolSize=5, maxPoolSize=5 2024-11-17T15:28:02,708 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7a780d55532c:0, corePoolSize=10, maxPoolSize=10 2024-11-17T15:28:02,708 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:28:02,708 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7a780d55532c:0, corePoolSize=2, maxPoolSize=2 2024-11-17T15:28:02,708 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:28:02,715 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T15:28:02,716 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-17T15:28:02,722 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:28:02,723 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-17T15:28:02,733 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731857312733 2024-11-17T15:28:02,735 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-17T15:28:02,737 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-17T15:28:02,741 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-17T15:28:02,742 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-17T15:28:02,742 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-17T15:28:02,742 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-17T15:28:02,744 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T15:28:02,753 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-17T15:28:02,754 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-17T15:28:02,755 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-17T15:28:02,759 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-17T15:28:02,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43077 is added to blk_1073741831_1007 (size=1321) 2024-11-17T15:28:02,759 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-17T15:28:02,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741831_1007 (size=1321) 2024-11-17T15:28:02,763 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-17T15:28:02,763 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d 2024-11-17T15:28:02,764 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.large.0-1731857282762,5,FailOnTimeoutGroup] 2024-11-17T15:28:02,765 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.small.0-1731857282765,5,FailOnTimeoutGroup] 2024-11-17T15:28:02,765 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T15:28:02,765 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-17T15:28:02,767 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-17T15:28:02,767 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-17T15:28:02,783 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56177, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-17T15:28:02,796 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34909 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7a780d55532c,33973,1731857281708 2024-11-17T15:28:02,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43077 is added to blk_1073741832_1008 (size=32) 2024-11-17T15:28:02,800 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34909 {}] master.ServerManager(517): Registering regionserver=7a780d55532c,33973,1731857281708 2024-11-17T15:28:02,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741832_1008 (size=32) 2024-11-17T15:28:02,802 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:28:02,804 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T15:28:02,808 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T15:28:02,808 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:28:02,809 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:28:02,809 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T15:28:02,812 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T15:28:02,812 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:28:02,813 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:28:02,814 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T15:28:02,816 DEBUG [RS:0;7a780d55532c:33973 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d 2024-11-17T15:28:02,816 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T15:28:02,817 DEBUG [RS:0;7a780d55532c:33973 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39521 2024-11-17T15:28:02,817 DEBUG [RS:0;7a780d55532c:33973 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-17T15:28:02,817 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:28:02,818 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:28:02,818 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T15:28:02,821 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T15:28:02,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34909-0x101268ad6a90000, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T15:28:02,821 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:28:02,822 DEBUG [RS:0;7a780d55532c:33973 {}] zookeeper.ZKUtil(111): regionserver:33973-0x101268ad6a90001, quorum=127.0.0.1:56154, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7a780d55532c,33973,1731857281708 2024-11-17T15:28:02,822 WARN [RS:0;7a780d55532c:33973 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T15:28:02,822 INFO [RS:0;7a780d55532c:33973 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T15:28:02,822 DEBUG [RS:0;7a780d55532c:33973 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708 2024-11-17T15:28:02,822 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:28:02,822 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T15:28:02,824 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/hbase/meta/1588230740 2024-11-17T15:28:02,824 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7a780d55532c,33973,1731857281708] 2024-11-17T15:28:02,825 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/hbase/meta/1588230740 2024-11-17T15:28:02,828 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T15:28:02,828 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T15:28:02,829 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T15:28:02,832 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T15:28:02,835 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T15:28:02,836 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=787649, jitterRate=0.0015475153923034668}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T15:28:02,839 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731857282802Initializing all the Stores at 1731857282804 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857282804Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857282804Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857282804Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857282804Cleaning up temporary data from old regions at 1731857282828 (+24 ms)Region opened successfully at 1731857282839 (+11 ms) 2024-11-17T15:28:02,839 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T15:28:02,839 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T15:28:02,839 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T15:28:02,839 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T15:28:02,840 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T15:28:02,841 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T15:28:02,841 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731857282839Disabling compacts and flushes for region at 1731857282839Disabling writes for close at 1731857282839Writing region close event to WAL at 1731857282840 (+1 ms)Closed at 1731857282841 (+1 ms) 2024-11-17T15:28:02,844 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T15:28:02,845 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-17T15:28:02,852 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-17T15:28:02,853 INFO [RS:0;7a780d55532c:33973 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-17T15:28:02,862 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T15:28:02,866 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-17T15:28:02,867 INFO [RS:0;7a780d55532c:33973 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-17T15:28:02,872 INFO [RS:0;7a780d55532c:33973 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-17T15:28:02,872 INFO [RS:0;7a780d55532c:33973 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T15:28:02,873 INFO [RS:0;7a780d55532c:33973 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-17T15:28:02,879 INFO [RS:0;7a780d55532c:33973 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-17T15:28:02,880 INFO [RS:0;7a780d55532c:33973 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-17T15:28:02,881 DEBUG [RS:0;7a780d55532c:33973 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:28:02,881 DEBUG [RS:0;7a780d55532c:33973 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:28:02,881 DEBUG [RS:0;7a780d55532c:33973 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:28:02,881 DEBUG [RS:0;7a780d55532c:33973 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:28:02,881 DEBUG [RS:0;7a780d55532c:33973 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:28:02,882 DEBUG [RS:0;7a780d55532c:33973 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7a780d55532c:0, corePoolSize=2, maxPoolSize=2 2024-11-17T15:28:02,882 DEBUG [RS:0;7a780d55532c:33973 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:28:02,882 DEBUG [RS:0;7a780d55532c:33973 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:28:02,882 DEBUG [RS:0;7a780d55532c:33973 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:28:02,882 DEBUG [RS:0;7a780d55532c:33973 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:28:02,882 DEBUG [RS:0;7a780d55532c:33973 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:28:02,882 DEBUG [RS:0;7a780d55532c:33973 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:28:02,883 DEBUG [RS:0;7a780d55532c:33973 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7a780d55532c:0, corePoolSize=3, maxPoolSize=3 2024-11-17T15:28:02,883 DEBUG [RS:0;7a780d55532c:33973 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0, corePoolSize=3, maxPoolSize=3 2024-11-17T15:28:02,884 INFO [RS:0;7a780d55532c:33973 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T15:28:02,884 INFO [RS:0;7a780d55532c:33973 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T15:28:02,884 INFO [RS:0;7a780d55532c:33973 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T15:28:02,884 INFO [RS:0;7a780d55532c:33973 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-17T15:28:02,884 INFO [RS:0;7a780d55532c:33973 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-17T15:28:02,884 INFO [RS:0;7a780d55532c:33973 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,33973,1731857281708-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T15:28:02,904 INFO [RS:0;7a780d55532c:33973 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-17T15:28:02,906 INFO [RS:0;7a780d55532c:33973 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,33973,1731857281708-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T15:28:02,907 INFO [RS:0;7a780d55532c:33973 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:28:02,907 INFO [RS:0;7a780d55532c:33973 {}] regionserver.Replication(171): 7a780d55532c,33973,1731857281708 started 2024-11-17T15:28:02,924 INFO [RS:0;7a780d55532c:33973 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:28:02,925 INFO [RS:0;7a780d55532c:33973 {}] regionserver.HRegionServer(1482): Serving as 7a780d55532c,33973,1731857281708, RpcServer on 7a780d55532c/172.17.0.2:33973, sessionid=0x101268ad6a90001 2024-11-17T15:28:02,926 DEBUG [RS:0;7a780d55532c:33973 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-17T15:28:02,926 DEBUG [RS:0;7a780d55532c:33973 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7a780d55532c,33973,1731857281708 2024-11-17T15:28:02,926 DEBUG [RS:0;7a780d55532c:33973 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a780d55532c,33973,1731857281708' 2024-11-17T15:28:02,926 DEBUG [RS:0;7a780d55532c:33973 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-17T15:28:02,927 DEBUG [RS:0;7a780d55532c:33973 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-17T15:28:02,928 DEBUG [RS:0;7a780d55532c:33973 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-17T15:28:02,928 DEBUG [RS:0;7a780d55532c:33973 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-17T15:28:02,928 DEBUG [RS:0;7a780d55532c:33973 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7a780d55532c,33973,1731857281708 2024-11-17T15:28:02,928 DEBUG [RS:0;7a780d55532c:33973 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a780d55532c,33973,1731857281708' 2024-11-17T15:28:02,928 DEBUG [RS:0;7a780d55532c:33973 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-17T15:28:02,929 DEBUG [RS:0;7a780d55532c:33973 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-17T15:28:02,929 DEBUG [RS:0;7a780d55532c:33973 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-17T15:28:02,929 INFO [RS:0;7a780d55532c:33973 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-17T15:28:02,929 INFO [RS:0;7a780d55532c:33973 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-17T15:28:03,017 WARN [7a780d55532c:34909 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-17T15:28:03,038 INFO [RS:0;7a780d55532c:33973 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a780d55532c%2C33973%2C1731857281708, suffix=, logDir=hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708, archiveDir=hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/oldWALs, maxLogs=32 2024-11-17T15:28:03,040 INFO [RS:0;7a780d55532c:33973 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C33973%2C1731857281708.1731857283040 2024-11-17T15:28:03,049 INFO [RS:0;7a780d55532c:33973 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708/7a780d55532c%2C33973%2C1731857281708.1731857283040 2024-11-17T15:28:03,050 DEBUG [RS:0;7a780d55532c:33973 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38403:38403),(127.0.0.1/127.0.0.1:40283:40283)] 2024-11-17T15:28:03,269 DEBUG [7a780d55532c:34909 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-17T15:28:03,282 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7a780d55532c,33973,1731857281708 2024-11-17T15:28:03,288 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7a780d55532c,33973,1731857281708, state=OPENING 2024-11-17T15:28:03,294 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-17T15:28:03,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34909-0x101268ad6a90000, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:28:03,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33973-0x101268ad6a90001, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:28:03,297 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T15:28:03,297 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T15:28:03,298 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T15:28:03,300 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7a780d55532c,33973,1731857281708}] 2024-11-17T15:28:03,476 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-17T15:28:03,479 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40803, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-17T15:28:03,490 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-17T15:28:03,490 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T15:28:03,493 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a780d55532c%2C33973%2C1731857281708.meta, suffix=.meta, logDir=hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708, archiveDir=hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/oldWALs, maxLogs=32 2024-11-17T15:28:03,495 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C33973%2C1731857281708.meta.1731857283495.meta 2024-11-17T15:28:03,503 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708/7a780d55532c%2C33973%2C1731857281708.meta.1731857283495.meta 2024-11-17T15:28:03,504 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40283:40283),(127.0.0.1/127.0.0.1:38403:38403)] 2024-11-17T15:28:03,506 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-17T15:28:03,508 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-17T15:28:03,511 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-17T15:28:03,516 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-17T15:28:03,520 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-17T15:28:03,521 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:28:03,521 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-17T15:28:03,521 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-17T15:28:03,524 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T15:28:03,526 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T15:28:03,526 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:28:03,527 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:28:03,527 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T15:28:03,529 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T15:28:03,529 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:28:03,529 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:28:03,529 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T15:28:03,531 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T15:28:03,531 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:28:03,531 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:28:03,532 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T15:28:03,533 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T15:28:03,533 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:28:03,534 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:28:03,534 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T15:28:03,535 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/hbase/meta/1588230740 2024-11-17T15:28:03,538 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/hbase/meta/1588230740 2024-11-17T15:28:03,540 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T15:28:03,540 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T15:28:03,541 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T15:28:03,543 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T15:28:03,545 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=721853, jitterRate=-0.08211763203144073}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T15:28:03,545 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-17T15:28:03,547 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731857283522Writing region info on filesystem at 1731857283522Initializing all the Stores at 1731857283524 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857283524Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857283524Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857283524Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857283524Cleaning up temporary data from old regions at 1731857283540 (+16 ms)Running coprocessor post-open hooks at 1731857283545 (+5 ms)Region opened successfully at 1731857283547 (+2 ms) 2024-11-17T15:28:03,554 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731857283467 2024-11-17T15:28:03,565 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-17T15:28:03,566 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-17T15:28:03,568 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7a780d55532c,33973,1731857281708 2024-11-17T15:28:03,570 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7a780d55532c,33973,1731857281708, state=OPEN 2024-11-17T15:28:03,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34909-0x101268ad6a90000, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T15:28:03,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33973-0x101268ad6a90001, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T15:28:03,575 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T15:28:03,575 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T15:28:03,576 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7a780d55532c,33973,1731857281708 2024-11-17T15:28:03,580 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-17T15:28:03,581 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7a780d55532c,33973,1731857281708 in 276 msec 2024-11-17T15:28:03,587 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-17T15:28:03,587 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 731 msec 2024-11-17T15:28:03,589 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T15:28:03,589 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-17T15:28:03,609 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T15:28:03,610 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7a780d55532c,33973,1731857281708, seqNum=-1] 2024-11-17T15:28:03,631 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T15:28:03,633 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44629, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T15:28:03,652 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0310 sec 2024-11-17T15:28:03,653 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731857283653, completionTime=-1 2024-11-17T15:28:03,655 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-17T15:28:03,655 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-17T15:28:03,682 INFO [master/7a780d55532c:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-17T15:28:03,683 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731857343682 2024-11-17T15:28:03,683 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731857403683 2024-11-17T15:28:03,683 INFO [master/7a780d55532c:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 27 msec 2024-11-17T15:28:03,685 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,34909,1731857280918-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T15:28:03,686 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,34909,1731857280918-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:28:03,686 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,34909,1731857280918-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:28:03,687 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7a780d55532c:34909, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:28:03,688 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-17T15:28:03,688 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-17T15:28:03,694 DEBUG [master/7a780d55532c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-17T15:28:03,716 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.837sec 2024-11-17T15:28:03,717 INFO [master/7a780d55532c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-17T15:28:03,718 INFO [master/7a780d55532c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-17T15:28:03,719 INFO [master/7a780d55532c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-17T15:28:03,720 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-17T15:28:03,720 INFO [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-17T15:28:03,721 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,34909,1731857280918-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T15:28:03,722 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,34909,1731857280918-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-17T15:28:03,731 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-17T15:28:03,731 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6072dac1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T15:28:03,732 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-17T15:28:03,732 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,34909,1731857280918-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:28:03,733 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-17T15:28:03,734 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-17T15:28:03,737 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7a780d55532c,34909,-1 for getting cluster id 2024-11-17T15:28:03,739 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-17T15:28:03,747 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6526ebff-589d-4563-8ba1-74a48d66d2de' 2024-11-17T15:28:03,750 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-17T15:28:03,750 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6526ebff-589d-4563-8ba1-74a48d66d2de" 2024-11-17T15:28:03,752 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e9c12a8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T15:28:03,752 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7a780d55532c,34909,-1] 2024-11-17T15:28:03,755 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-17T15:28:03,756 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:28:03,759 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43550, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-17T15:28:03,762 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@312e0b79, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T15:28:03,762 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T15:28:03,769 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7a780d55532c,33973,1731857281708, seqNum=-1] 2024-11-17T15:28:03,770 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T15:28:03,772 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53212, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T15:28:03,820 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7a780d55532c,34909,1731857280918 2024-11-17T15:28:03,821 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:28:03,840 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-17T15:28:03,845 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-17T15:28:03,855 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 7a780d55532c,34909,1731857280918 2024-11-17T15:28:03,858 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3c0d2759 2024-11-17T15:28:03,860 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-17T15:28:03,862 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43558, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-17T15:28:03,865 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34909 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-17T15:28:03,865 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34909 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-17T15:28:03,870 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34909 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T15:28:03,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34909 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-17T15:28:03,880 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-17T15:28:03,882 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34909 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-17T15:28:03,882 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:28:03,885 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-17T15:28:03,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34909 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-17T15:28:03,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741835_1011 (size=389) 2024-11-17T15:28:03,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43077 is added to blk_1073741835_1011 (size=389) 2024-11-17T15:28:03,928 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 2c52e0b374b469936fb4c18092738707, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731857283864.2c52e0b374b469936fb4c18092738707.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d 2024-11-17T15:28:03,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43077 is added to blk_1073741836_1012 (size=72) 2024-11-17T15:28:03,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741836_1012 (size=72) 2024-11-17T15:28:03,939 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731857283864.2c52e0b374b469936fb4c18092738707.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:28:03,940 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 2c52e0b374b469936fb4c18092738707, disabling compactions & flushes 2024-11-17T15:28:03,940 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731857283864.2c52e0b374b469936fb4c18092738707. 2024-11-17T15:28:03,940 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731857283864.2c52e0b374b469936fb4c18092738707. 2024-11-17T15:28:03,940 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731857283864.2c52e0b374b469936fb4c18092738707. after waiting 0 ms 2024-11-17T15:28:03,940 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731857283864.2c52e0b374b469936fb4c18092738707. 2024-11-17T15:28:03,940 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731857283864.2c52e0b374b469936fb4c18092738707. 2024-11-17T15:28:03,940 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 2c52e0b374b469936fb4c18092738707: Waiting for close lock at 1731857283940Disabling compacts and flushes for region at 1731857283940Disabling writes for close at 1731857283940Writing region close event to WAL at 1731857283940Closed at 1731857283940 2024-11-17T15:28:03,942 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-17T15:28:03,947 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731857283864.2c52e0b374b469936fb4c18092738707.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731857283942"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731857283942"}]},"ts":"1731857283942"} 2024-11-17T15:28:03,953 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-17T15:28:03,955 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-17T15:28:03,958 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731857283955"}]},"ts":"1731857283955"} 2024-11-17T15:28:03,963 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-17T15:28:03,965 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=2c52e0b374b469936fb4c18092738707, ASSIGN}] 2024-11-17T15:28:03,968 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=2c52e0b374b469936fb4c18092738707, ASSIGN 2024-11-17T15:28:03,969 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=2c52e0b374b469936fb4c18092738707, ASSIGN; state=OFFLINE, location=7a780d55532c,33973,1731857281708; forceNewPlan=false, retain=false 2024-11-17T15:28:04,121 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=2c52e0b374b469936fb4c18092738707, regionState=OPENING, regionLocation=7a780d55532c,33973,1731857281708 2024-11-17T15:28:04,126 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=2c52e0b374b469936fb4c18092738707, ASSIGN because future has completed 2024-11-17T15:28:04,127 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2c52e0b374b469936fb4c18092738707, server=7a780d55532c,33973,1731857281708}] 2024-11-17T15:28:04,288 INFO [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731857283864.2c52e0b374b469936fb4c18092738707. 2024-11-17T15:28:04,288 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 2c52e0b374b469936fb4c18092738707, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731857283864.2c52e0b374b469936fb4c18092738707.', STARTKEY => '', ENDKEY => ''} 2024-11-17T15:28:04,289 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 2c52e0b374b469936fb4c18092738707 2024-11-17T15:28:04,289 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731857283864.2c52e0b374b469936fb4c18092738707.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:28:04,289 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 2c52e0b374b469936fb4c18092738707 2024-11-17T15:28:04,289 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 2c52e0b374b469936fb4c18092738707 2024-11-17T15:28:04,291 INFO [StoreOpener-2c52e0b374b469936fb4c18092738707-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 2c52e0b374b469936fb4c18092738707 2024-11-17T15:28:04,293 INFO [StoreOpener-2c52e0b374b469936fb4c18092738707-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2c52e0b374b469936fb4c18092738707 columnFamilyName info 2024-11-17T15:28:04,293 DEBUG [StoreOpener-2c52e0b374b469936fb4c18092738707-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:28:04,294 INFO [StoreOpener-2c52e0b374b469936fb4c18092738707-1 {}] regionserver.HStore(327): Store=2c52e0b374b469936fb4c18092738707/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T15:28:04,295 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 2c52e0b374b469936fb4c18092738707 2024-11-17T15:28:04,296 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707 2024-11-17T15:28:04,296 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707 2024-11-17T15:28:04,297 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 2c52e0b374b469936fb4c18092738707 2024-11-17T15:28:04,297 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 2c52e0b374b469936fb4c18092738707 2024-11-17T15:28:04,299 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 2c52e0b374b469936fb4c18092738707 2024-11-17T15:28:04,303 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T15:28:04,303 INFO [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 2c52e0b374b469936fb4c18092738707; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=876094, jitterRate=0.11401195824146271}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-17T15:28:04,303 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2c52e0b374b469936fb4c18092738707 2024-11-17T15:28:04,304 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 2c52e0b374b469936fb4c18092738707: Running coprocessor pre-open hook at 1731857284289Writing region info on filesystem at 1731857284289Initializing all the Stores at 1731857284290 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857284291 (+1 ms)Cleaning up temporary data from old regions at 1731857284297 (+6 ms)Running coprocessor post-open hooks at 1731857284303 (+6 ms)Region opened successfully at 1731857284304 (+1 ms) 2024-11-17T15:28:04,306 INFO [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731857283864.2c52e0b374b469936fb4c18092738707., pid=6, masterSystemTime=1731857284281 2024-11-17T15:28:04,310 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731857283864.2c52e0b374b469936fb4c18092738707. 2024-11-17T15:28:04,310 INFO [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731857283864.2c52e0b374b469936fb4c18092738707. 2024-11-17T15:28:04,311 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=2c52e0b374b469936fb4c18092738707, regionState=OPEN, openSeqNum=2, regionLocation=7a780d55532c,33973,1731857281708 2024-11-17T15:28:04,315 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2c52e0b374b469936fb4c18092738707, server=7a780d55532c,33973,1731857281708 because future has completed 2024-11-17T15:28:04,321 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-17T15:28:04,321 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 2c52e0b374b469936fb4c18092738707, server=7a780d55532c,33973,1731857281708 in 190 msec 2024-11-17T15:28:04,326 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-17T15:28:04,326 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=2c52e0b374b469936fb4c18092738707, ASSIGN in 356 msec 2024-11-17T15:28:04,327 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-17T15:28:04,328 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731857284327"}]},"ts":"1731857284327"} 2024-11-17T15:28:04,331 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-17T15:28:04,333 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-17T15:28:04,336 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 460 msec 2024-11-17T15:28:09,025 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-17T15:28:09,067 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-17T15:28:09,068 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-17T15:28:11,380 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-17T15:28:11,380 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-17T15:28:11,382 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-17T15:28:11,382 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-17T15:28:11,383 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T15:28:11,383 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-17T15:28:11,383 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-17T15:28:11,383 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-17T15:28:13,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34909 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-17T15:28:13,937 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-17T15:28:13,940 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-17T15:28:13,948 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-17T15:28:13,949 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731857283864.2c52e0b374b469936fb4c18092738707. 2024-11-17T15:28:13,950 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C33973%2C1731857281708.1731857293950 2024-11-17T15:28:13,959 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:28:13,959 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:28:13,960 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:28:13,960 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:28:13,960 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:28:13,960 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708/7a780d55532c%2C33973%2C1731857281708.1731857283040 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708/7a780d55532c%2C33973%2C1731857281708.1731857293950 2024-11-17T15:28:13,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741833_1009 (size=451) 2024-11-17T15:28:13,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43077 is added to blk_1073741833_1009 (size=451) 2024-11-17T15:28:13,968 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38403:38403),(127.0.0.1/127.0.0.1:40283:40283)] 2024-11-17T15:28:13,969 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708/7a780d55532c%2C33973%2C1731857281708.1731857283040 to hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/oldWALs/7a780d55532c%2C33973%2C1731857281708.1731857283040 2024-11-17T15:28:13,978 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731857283864.2c52e0b374b469936fb4c18092738707., hostname=7a780d55532c,33973,1731857281708, seqNum=2] 2024-11-17T15:28:26,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33973 {}] regionserver.HRegion(8855): Flush requested on 2c52e0b374b469936fb4c18092738707 2024-11-17T15:28:26,013 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2c52e0b374b469936fb4c18092738707 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-17T15:28:26,080 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/.tmp/info/4adedf6446284f2088b301b50b92ab3d is 1080, key is row0001/info:/1731857293981/Put/seqid=0 2024-11-17T15:28:26,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43077 is added to blk_1073741838_1014 (size=12509) 2024-11-17T15:28:26,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741838_1014 (size=12509) 2024-11-17T15:28:26,094 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/.tmp/info/4adedf6446284f2088b301b50b92ab3d 2024-11-17T15:28:26,144 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/.tmp/info/4adedf6446284f2088b301b50b92ab3d as hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/info/4adedf6446284f2088b301b50b92ab3d 2024-11-17T15:28:26,156 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/info/4adedf6446284f2088b301b50b92ab3d, entries=7, sequenceid=11, filesize=12.2 K 2024-11-17T15:28:26,165 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 2c52e0b374b469936fb4c18092738707 in 150ms, sequenceid=11, compaction requested=false 2024-11-17T15:28:26,166 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2c52e0b374b469936fb4c18092738707: 2024-11-17T15:28:29,959 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-17T15:28:34,024 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C33973%2C1731857281708.1731857314023 2024-11-17T15:28:34,235 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43077,DS-ab799247-4f1a-481a-89c6-78875ed51ffa,DISK], DatanodeInfoWithStorage[127.0.0.1:39849,DS-e4cd15d8-3f91-463e-9ba6-1b918a841126,DISK]] 2024-11-17T15:28:34,235 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:28:34,235 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:28:34,235 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:28:34,236 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:28:34,236 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:28:34,236 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708/7a780d55532c%2C33973%2C1731857281708.1731857293950 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708/7a780d55532c%2C33973%2C1731857281708.1731857314023 2024-11-17T15:28:34,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741837_1013 (size=12399) 2024-11-17T15:28:34,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43077 is added to blk_1073741837_1013 (size=12399) 2024-11-17T15:28:34,244 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38403:38403),(127.0.0.1/127.0.0.1:40283:40283)] 2024-11-17T15:28:34,447 INFO [FSHLog-0-hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d-prefix:7a780d55532c,33973,1731857281708 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43077,DS-ab799247-4f1a-481a-89c6-78875ed51ffa,DISK], DatanodeInfoWithStorage[127.0.0.1:39849,DS-e4cd15d8-3f91-463e-9ba6-1b918a841126,DISK]] 2024-11-17T15:28:36,652 INFO [FSHLog-0-hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d-prefix:7a780d55532c,33973,1731857281708 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43077,DS-ab799247-4f1a-481a-89c6-78875ed51ffa,DISK], DatanodeInfoWithStorage[127.0.0.1:39849,DS-e4cd15d8-3f91-463e-9ba6-1b918a841126,DISK]] 2024-11-17T15:28:38,856 INFO [FSHLog-0-hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d-prefix:7a780d55532c,33973,1731857281708 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43077,DS-ab799247-4f1a-481a-89c6-78875ed51ffa,DISK], DatanodeInfoWithStorage[127.0.0.1:39849,DS-e4cd15d8-3f91-463e-9ba6-1b918a841126,DISK]] 2024-11-17T15:28:41,061 INFO [FSHLog-0-hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d-prefix:7a780d55532c,33973,1731857281708 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43077,DS-ab799247-4f1a-481a-89c6-78875ed51ffa,DISK], DatanodeInfoWithStorage[127.0.0.1:39849,DS-e4cd15d8-3f91-463e-9ba6-1b918a841126,DISK]] 2024-11-17T15:28:41,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33973 {}] regionserver.HRegion(8855): Flush requested on 2c52e0b374b469936fb4c18092738707 2024-11-17T15:28:41,061 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2c52e0b374b469936fb4c18092738707 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-17T15:28:41,263 INFO [FSHLog-0-hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d-prefix:7a780d55532c,33973,1731857281708 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43077,DS-ab799247-4f1a-481a-89c6-78875ed51ffa,DISK], DatanodeInfoWithStorage[127.0.0.1:39849,DS-e4cd15d8-3f91-463e-9ba6-1b918a841126,DISK]] 2024-11-17T15:28:41,269 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/.tmp/info/b6094fb992024ca883b8f7aae340050a is 1080, key is row0008/info:/1731857308012/Put/seqid=0 2024-11-17T15:28:41,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741840_1016 (size=12509) 2024-11-17T15:28:41,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43077 is added to blk_1073741840_1016 (size=12509) 2024-11-17T15:28:41,278 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/.tmp/info/b6094fb992024ca883b8f7aae340050a 2024-11-17T15:28:41,287 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/.tmp/info/b6094fb992024ca883b8f7aae340050a as hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/info/b6094fb992024ca883b8f7aae340050a 2024-11-17T15:28:41,295 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/info/b6094fb992024ca883b8f7aae340050a, entries=7, sequenceid=21, filesize=12.2 K 2024-11-17T15:28:41,497 INFO [FSHLog-0-hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d-prefix:7a780d55532c,33973,1731857281708 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43077,DS-ab799247-4f1a-481a-89c6-78875ed51ffa,DISK], DatanodeInfoWithStorage[127.0.0.1:39849,DS-e4cd15d8-3f91-463e-9ba6-1b918a841126,DISK]] 2024-11-17T15:28:41,497 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 2c52e0b374b469936fb4c18092738707 in 436ms, sequenceid=21, compaction requested=false 2024-11-17T15:28:41,497 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2c52e0b374b469936fb4c18092738707: 2024-11-17T15:28:41,498 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-17T15:28:41,498 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T15:28:41,499 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/info/4adedf6446284f2088b301b50b92ab3d because midkey is the same as first or last row 2024-11-17T15:28:43,265 INFO [FSHLog-0-hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d-prefix:7a780d55532c,33973,1731857281708 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43077,DS-ab799247-4f1a-481a-89c6-78875ed51ffa,DISK], DatanodeInfoWithStorage[127.0.0.1:39849,DS-e4cd15d8-3f91-463e-9ba6-1b918a841126,DISK]] 2024-11-17T15:28:43,766 INFO [master/7a780d55532c:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-17T15:28:43,766 INFO [master/7a780d55532c:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-17T15:28:45,469 INFO [FSHLog-0-hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d-prefix:7a780d55532c,33973,1731857281708 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43077,DS-ab799247-4f1a-481a-89c6-78875ed51ffa,DISK], DatanodeInfoWithStorage[127.0.0.1:39849,DS-e4cd15d8-3f91-463e-9ba6-1b918a841126,DISK]] 2024-11-17T15:28:45,471 WARN [FSHLog-0-hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d-prefix:7a780d55532c,33973,1731857281708 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43077,DS-ab799247-4f1a-481a-89c6-78875ed51ffa,DISK], DatanodeInfoWithStorage[127.0.0.1:39849,DS-e4cd15d8-3f91-463e-9ba6-1b918a841126,DISK]] 2024-11-17T15:28:45,472 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7a780d55532c%2C33973%2C1731857281708:(num 1731857314023) roll requested 2024-11-17T15:28:45,472 INFO [regionserver/7a780d55532c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C33973%2C1731857281708.1731857325472 2024-11-17T15:28:45,680 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43077,DS-ab799247-4f1a-481a-89c6-78875ed51ffa,DISK], DatanodeInfoWithStorage[127.0.0.1:39849,DS-e4cd15d8-3f91-463e-9ba6-1b918a841126,DISK]] 2024-11-17T15:28:45,680 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:28:45,680 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:28:45,681 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:28:45,681 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:28:45,681 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:28:45,681 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708/7a780d55532c%2C33973%2C1731857281708.1731857314023 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708/7a780d55532c%2C33973%2C1731857281708.1731857325472 2024-11-17T15:28:45,682 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40283:40283),(127.0.0.1/127.0.0.1:38403:38403)] 2024-11-17T15:28:45,682 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708/7a780d55532c%2C33973%2C1731857281708.1731857314023 is not closed yet, will try archiving it next time 2024-11-17T15:28:45,682 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708/7a780d55532c%2C33973%2C1731857281708.1731857293950 to hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/oldWALs/7a780d55532c%2C33973%2C1731857281708.1731857293950 2024-11-17T15:28:45,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741839_1015 (size=7739) 2024-11-17T15:28:45,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43077 is added to blk_1073741839_1015 (size=7739) 2024-11-17T15:28:47,674 INFO [FSHLog-0-hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d-prefix:7a780d55532c,33973,1731857281708 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39849,DS-e4cd15d8-3f91-463e-9ba6-1b918a841126,DISK], DatanodeInfoWithStorage[127.0.0.1:43077,DS-ab799247-4f1a-481a-89c6-78875ed51ffa,DISK]] 2024-11-17T15:28:49,289 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 2c52e0b374b469936fb4c18092738707, had cached 0 bytes from a total of 25018 2024-11-17T15:28:49,878 INFO [FSHLog-0-hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d-prefix:7a780d55532c,33973,1731857281708 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39849,DS-e4cd15d8-3f91-463e-9ba6-1b918a841126,DISK], DatanodeInfoWithStorage[127.0.0.1:43077,DS-ab799247-4f1a-481a-89c6-78875ed51ffa,DISK]] 2024-11-17T15:28:52,082 INFO [FSHLog-0-hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d-prefix:7a780d55532c,33973,1731857281708 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39849,DS-e4cd15d8-3f91-463e-9ba6-1b918a841126,DISK], DatanodeInfoWithStorage[127.0.0.1:43077,DS-ab799247-4f1a-481a-89c6-78875ed51ffa,DISK]] 2024-11-17T15:28:54,286 INFO [FSHLog-0-hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d-prefix:7a780d55532c,33973,1731857281708 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39849,DS-e4cd15d8-3f91-463e-9ba6-1b918a841126,DISK], DatanodeInfoWithStorage[127.0.0.1:43077,DS-ab799247-4f1a-481a-89c6-78875ed51ffa,DISK]] 2024-11-17T15:28:56,288 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-17T15:28:56,289 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C33973%2C1731857281708.1731857336288 2024-11-17T15:28:59,959 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-17T15:29:01,298 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39849,DS-e4cd15d8-3f91-463e-9ba6-1b918a841126,DISK], DatanodeInfoWithStorage[127.0.0.1:43077,DS-ab799247-4f1a-481a-89c6-78875ed51ffa,DISK]] 2024-11-17T15:29:01,300 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39849,DS-e4cd15d8-3f91-463e-9ba6-1b918a841126,DISK], DatanodeInfoWithStorage[127.0.0.1:43077,DS-ab799247-4f1a-481a-89c6-78875ed51ffa,DISK]] 2024-11-17T15:29:01,300 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7a780d55532c%2C33973%2C1731857281708:(num 1731857336288) roll requested 2024-11-17T15:29:01,300 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:01,300 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:01,300 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:01,300 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:01,301 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:01,301 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708/7a780d55532c%2C33973%2C1731857281708.1731857325472 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708/7a780d55532c%2C33973%2C1731857281708.1731857336288 2024-11-17T15:29:01,302 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40283:40283),(127.0.0.1/127.0.0.1:38403:38403)] 2024-11-17T15:29:01,302 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708/7a780d55532c%2C33973%2C1731857281708.1731857325472 is not closed yet, will try archiving it next time 2024-11-17T15:29:01,302 INFO [regionserver/7a780d55532c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C33973%2C1731857281708.1731857341302 2024-11-17T15:29:01,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43077 is added to blk_1073741841_1017 (size=4753) 2024-11-17T15:29:01,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741841_1017 (size=4753) 2024-11-17T15:29:06,305 INFO [FSHLog-0-hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d-prefix:7a780d55532c,33973,1731857281708 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39849,DS-e4cd15d8-3f91-463e-9ba6-1b918a841126,DISK], DatanodeInfoWithStorage[127.0.0.1:43077,DS-ab799247-4f1a-481a-89c6-78875ed51ffa,DISK]] 2024-11-17T15:29:06,305 WARN [FSHLog-0-hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d-prefix:7a780d55532c,33973,1731857281708 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39849,DS-e4cd15d8-3f91-463e-9ba6-1b918a841126,DISK], DatanodeInfoWithStorage[127.0.0.1:43077,DS-ab799247-4f1a-481a-89c6-78875ed51ffa,DISK]] 2024-11-17T15:29:06,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33973 {}] regionserver.HRegion(8855): Flush requested on 2c52e0b374b469936fb4c18092738707 2024-11-17T15:29:06,306 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2c52e0b374b469936fb4c18092738707 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-17T15:29:06,311 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39849,DS-e4cd15d8-3f91-463e-9ba6-1b918a841126,DISK], DatanodeInfoWithStorage[127.0.0.1:43077,DS-ab799247-4f1a-481a-89c6-78875ed51ffa,DISK]] 2024-11-17T15:29:06,311 WARN [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39849,DS-e4cd15d8-3f91-463e-9ba6-1b918a841126,DISK], DatanodeInfoWithStorage[127.0.0.1:43077,DS-ab799247-4f1a-481a-89c6-78875ed51ffa,DISK]] 2024-11-17T15:29:08,307 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-17T15:29:11,308 INFO [FSHLog-0-hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d-prefix:7a780d55532c,33973,1731857281708 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39849,DS-e4cd15d8-3f91-463e-9ba6-1b918a841126,DISK], DatanodeInfoWithStorage[127.0.0.1:43077,DS-ab799247-4f1a-481a-89c6-78875ed51ffa,DISK]] 2024-11-17T15:29:11,308 WARN [FSHLog-0-hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d-prefix:7a780d55532c,33973,1731857281708 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39849,DS-e4cd15d8-3f91-463e-9ba6-1b918a841126,DISK], DatanodeInfoWithStorage[127.0.0.1:43077,DS-ab799247-4f1a-481a-89c6-78875ed51ffa,DISK]] 2024-11-17T15:29:11,308 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:11,308 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:11,309 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:11,309 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:11,309 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:11,309 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708/7a780d55532c%2C33973%2C1731857281708.1731857336288 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708/7a780d55532c%2C33973%2C1731857281708.1731857341302 2024-11-17T15:29:11,311 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38403:38403),(127.0.0.1/127.0.0.1:40283:40283)] 2024-11-17T15:29:11,311 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708/7a780d55532c%2C33973%2C1731857281708.1731857336288 is not closed yet, will try archiving it next time 2024-11-17T15:29:11,311 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7a780d55532c%2C33973%2C1731857281708:(num 1731857341302) roll requested 2024-11-17T15:29:11,311 INFO [regionserver/7a780d55532c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C33973%2C1731857281708.1731857351311 2024-11-17T15:29:11,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43077 is added to blk_1073741842_1018 (size=1569) 2024-11-17T15:29:11,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741842_1018 (size=1569) 2024-11-17T15:29:11,316 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/.tmp/info/cbc7d7fa61ca4cd3bdc626cd2d576f19 is 1080, key is row0015/info:/1731857323063/Put/seqid=0 2024-11-17T15:29:11,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43077 is added to blk_1073741844_1020 (size=12509) 2024-11-17T15:29:11,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741844_1020 (size=12509) 2024-11-17T15:29:11,323 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/.tmp/info/cbc7d7fa61ca4cd3bdc626cd2d576f19 2024-11-17T15:29:11,334 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/.tmp/info/cbc7d7fa61ca4cd3bdc626cd2d576f19 as hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/info/cbc7d7fa61ca4cd3bdc626cd2d576f19 2024-11-17T15:29:11,344 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/info/cbc7d7fa61ca4cd3bdc626cd2d576f19, entries=7, sequenceid=31, filesize=12.2 K 2024-11-17T15:29:16,320 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43077,DS-ab799247-4f1a-481a-89c6-78875ed51ffa,DISK], DatanodeInfoWithStorage[127.0.0.1:39849,DS-e4cd15d8-3f91-463e-9ba6-1b918a841126,DISK]] 2024-11-17T15:29:16,320 WARN [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43077,DS-ab799247-4f1a-481a-89c6-78875ed51ffa,DISK], DatanodeInfoWithStorage[127.0.0.1:39849,DS-e4cd15d8-3f91-463e-9ba6-1b918a841126,DISK]] 2024-11-17T15:29:16,346 INFO [FSHLog-0-hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d-prefix:7a780d55532c,33973,1731857281708 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43077,DS-ab799247-4f1a-481a-89c6-78875ed51ffa,DISK], DatanodeInfoWithStorage[127.0.0.1:39849,DS-e4cd15d8-3f91-463e-9ba6-1b918a841126,DISK]] 2024-11-17T15:29:16,346 WARN [FSHLog-0-hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d-prefix:7a780d55532c,33973,1731857281708 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43077,DS-ab799247-4f1a-481a-89c6-78875ed51ffa,DISK], DatanodeInfoWithStorage[127.0.0.1:39849,DS-e4cd15d8-3f91-463e-9ba6-1b918a841126,DISK]] 2024-11-17T15:29:16,346 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 2c52e0b374b469936fb4c18092738707 in 10040ms, sequenceid=31, compaction requested=true 2024-11-17T15:29:16,346 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2c52e0b374b469936fb4c18092738707: 2024-11-17T15:29:16,346 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:16,346 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-17T15:29:16,346 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:16,346 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T15:29:16,346 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/info/4adedf6446284f2088b301b50b92ab3d because midkey is the same as first or last row 2024-11-17T15:29:16,346 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:16,346 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:16,347 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:16,347 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708/7a780d55532c%2C33973%2C1731857281708.1731857341302 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708/7a780d55532c%2C33973%2C1731857281708.1731857351311 2024-11-17T15:29:16,348 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38403:38403),(127.0.0.1/127.0.0.1:40283:40283)] 2024-11-17T15:29:16,348 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708/7a780d55532c%2C33973%2C1731857281708.1731857341302 is not closed yet, will try archiving it next time 2024-11-17T15:29:16,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c52e0b374b469936fb4c18092738707:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T15:29:16,348 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708/7a780d55532c%2C33973%2C1731857281708.1731857314023 to hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/oldWALs/7a780d55532c%2C33973%2C1731857281708.1731857314023 2024-11-17T15:29:16,348 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7a780d55532c%2C33973%2C1731857281708:(num 1731857351311) roll requested 2024-11-17T15:29:16,349 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C33973%2C1731857281708.1731857356348 2024-11-17T15:29:16,351 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708/7a780d55532c%2C33973%2C1731857281708.1731857325472 to hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/oldWALs/7a780d55532c%2C33973%2C1731857281708.1731857325472 2024-11-17T15:29:16,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T15:29:16,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741843_1019 (size=438) 2024-11-17T15:29:16,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43077 is added to blk_1073741843_1019 (size=438) 2024-11-17T15:29:16,353 DEBUG [RS:0;7a780d55532c:33973-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T15:29:16,353 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708/7a780d55532c%2C33973%2C1731857281708.1731857336288 to hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/oldWALs/7a780d55532c%2C33973%2C1731857281708.1731857336288 2024-11-17T15:29:16,356 DEBUG [RS:0;7a780d55532c:33973-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T15:29:16,358 DEBUG [RS:0;7a780d55532c:33973-shortCompactions-0 {}] regionserver.HStore(1541): 2c52e0b374b469936fb4c18092738707/info is initiating minor compaction (all files) 2024-11-17T15:29:16,359 INFO [RS:0;7a780d55532c:33973-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 2c52e0b374b469936fb4c18092738707/info in TestLogRolling-testSlowSyncLogRolling,,1731857283864.2c52e0b374b469936fb4c18092738707. 2024-11-17T15:29:16,359 INFO [RS:0;7a780d55532c:33973-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/info/4adedf6446284f2088b301b50b92ab3d, hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/info/b6094fb992024ca883b8f7aae340050a, hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/info/cbc7d7fa61ca4cd3bdc626cd2d576f19] into tmpdir=hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/.tmp, totalSize=36.6 K 2024-11-17T15:29:16,361 DEBUG [RS:0;7a780d55532c:33973-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4adedf6446284f2088b301b50b92ab3d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731857293981 2024-11-17T15:29:16,361 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:16,361 DEBUG [RS:0;7a780d55532c:33973-shortCompactions-0 {}] compactions.Compactor(225): Compacting b6094fb992024ca883b8f7aae340050a, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731857308012 2024-11-17T15:29:16,362 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:16,362 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:16,362 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:16,362 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:16,362 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708/7a780d55532c%2C33973%2C1731857281708.1731857351311 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708/7a780d55532c%2C33973%2C1731857281708.1731857356348 2024-11-17T15:29:16,362 DEBUG [RS:0;7a780d55532c:33973-shortCompactions-0 {}] compactions.Compactor(225): Compacting cbc7d7fa61ca4cd3bdc626cd2d576f19, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731857323063 2024-11-17T15:29:16,363 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40283:40283),(127.0.0.1/127.0.0.1:38403:38403)] 2024-11-17T15:29:16,363 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708/7a780d55532c%2C33973%2C1731857281708.1731857341302 is not closed yet, will try archiving it next time 2024-11-17T15:29:16,363 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708/7a780d55532c%2C33973%2C1731857281708.1731857351311 is not closed yet, will try archiving it next time 2024-11-17T15:29:16,364 INFO [regionserver/7a780d55532c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C33973%2C1731857281708.1731857356363 2024-11-17T15:29:16,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741845_1021 (size=93) 2024-11-17T15:29:16,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43077 is added to blk_1073741845_1021 (size=93) 2024-11-17T15:29:16,367 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708/7a780d55532c%2C33973%2C1731857281708.1731857341302 is not closed yet, will try archiving it next time 2024-11-17T15:29:16,367 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708/7a780d55532c%2C33973%2C1731857281708.1731857351311 to hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/oldWALs/7a780d55532c%2C33973%2C1731857281708.1731857351311 2024-11-17T15:29:16,382 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:16,382 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:16,382 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:16,382 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:16,382 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:16,383 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708/7a780d55532c%2C33973%2C1731857281708.1731857356348 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708/7a780d55532c%2C33973%2C1731857281708.1731857356363 2024-11-17T15:29:16,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43077 is added to blk_1073741846_1022 (size=1258) 2024-11-17T15:29:16,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741846_1022 (size=1258) 2024-11-17T15:29:16,386 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708/7a780d55532c%2C33973%2C1731857281708.1731857341302 is not closed yet, will try archiving it next time 2024-11-17T15:29:16,393 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38403:38403),(127.0.0.1/127.0.0.1:40283:40283)] 2024-11-17T15:29:16,393 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708/7a780d55532c%2C33973%2C1731857281708.1731857341302 is not closed yet, will try archiving it next time 2024-11-17T15:29:16,401 INFO [RS:0;7a780d55532c:33973-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c52e0b374b469936fb4c18092738707#info#compaction#3 average throughput is 21.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T15:29:16,402 DEBUG [RS:0;7a780d55532c:33973-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/.tmp/info/641c8b75fe8a4440a65553bc2f5de475 is 1080, key is row0001/info:/1731857293981/Put/seqid=0 2024-11-17T15:29:16,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741848_1024 (size=27710) 2024-11-17T15:29:16,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43077 is added to blk_1073741848_1024 (size=27710) 2024-11-17T15:29:16,425 DEBUG [RS:0;7a780d55532c:33973-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/.tmp/info/641c8b75fe8a4440a65553bc2f5de475 as hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/info/641c8b75fe8a4440a65553bc2f5de475 2024-11-17T15:29:16,442 INFO [RS:0;7a780d55532c:33973-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 2c52e0b374b469936fb4c18092738707/info of 2c52e0b374b469936fb4c18092738707 into 641c8b75fe8a4440a65553bc2f5de475(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T15:29:16,442 DEBUG [RS:0;7a780d55532c:33973-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 2c52e0b374b469936fb4c18092738707: 2024-11-17T15:29:16,445 INFO [RS:0;7a780d55532c:33973-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731857283864.2c52e0b374b469936fb4c18092738707., storeName=2c52e0b374b469936fb4c18092738707/info, priority=13, startTime=1731857356348; duration=0sec 2024-11-17T15:29:16,445 DEBUG [RS:0;7a780d55532c:33973-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-17T15:29:16,445 DEBUG [RS:0;7a780d55532c:33973-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T15:29:16,445 DEBUG [RS:0;7a780d55532c:33973-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/info/641c8b75fe8a4440a65553bc2f5de475 because midkey is the same as first or last row 2024-11-17T15:29:16,446 DEBUG [RS:0;7a780d55532c:33973-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-17T15:29:16,446 DEBUG [RS:0;7a780d55532c:33973-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T15:29:16,446 DEBUG [RS:0;7a780d55532c:33973-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/info/641c8b75fe8a4440a65553bc2f5de475 because midkey is the same as first or last row 2024-11-17T15:29:16,446 DEBUG [RS:0;7a780d55532c:33973-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-17T15:29:16,446 DEBUG [RS:0;7a780d55532c:33973-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T15:29:16,446 DEBUG [RS:0;7a780d55532c:33973-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/info/641c8b75fe8a4440a65553bc2f5de475 because midkey is the same as first or last row 2024-11-17T15:29:16,446 DEBUG [RS:0;7a780d55532c:33973-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T15:29:16,447 DEBUG [RS:0;7a780d55532c:33973-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c52e0b374b469936fb4c18092738707:info 2024-11-17T15:29:16,753 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/WALs/7a780d55532c,33973,1731857281708/7a780d55532c%2C33973%2C1731857281708.1731857341302 to hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/oldWALs/7a780d55532c%2C33973%2C1731857281708.1731857341302 2024-11-17T15:29:28,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33973 {}] regionserver.HRegion(8855): Flush requested on 2c52e0b374b469936fb4c18092738707 2024-11-17T15:29:28,395 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2c52e0b374b469936fb4c18092738707 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-17T15:29:28,401 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/.tmp/info/1e2244319c974959bd15de5b9898be1b is 1080, key is row0022/info:/1731857356365/Put/seqid=0 2024-11-17T15:29:28,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43077 is added to blk_1073741849_1025 (size=12509) 2024-11-17T15:29:28,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741849_1025 (size=12509) 2024-11-17T15:29:28,409 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/.tmp/info/1e2244319c974959bd15de5b9898be1b 2024-11-17T15:29:28,419 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/.tmp/info/1e2244319c974959bd15de5b9898be1b as hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/info/1e2244319c974959bd15de5b9898be1b 2024-11-17T15:29:28,427 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/info/1e2244319c974959bd15de5b9898be1b, entries=7, sequenceid=42, filesize=12.2 K 2024-11-17T15:29:28,428 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 2c52e0b374b469936fb4c18092738707 in 34ms, sequenceid=42, compaction requested=false 2024-11-17T15:29:28,428 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2c52e0b374b469936fb4c18092738707: 2024-11-17T15:29:28,429 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-17T15:29:28,429 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T15:29:28,429 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/info/641c8b75fe8a4440a65553bc2f5de475 because midkey is the same as first or last row 2024-11-17T15:29:29,960 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-17T15:29:34,289 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 2c52e0b374b469936fb4c18092738707, had cached 0 bytes from a total of 40219 2024-11-17T15:29:36,410 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-17T15:29:36,411 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T15:29:36,411 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T15:29:36,416 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:29:36,416 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:29:36,416 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-17T15:29:36,416 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-17T15:29:36,416 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=131935351, stopped=false 2024-11-17T15:29:36,417 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7a780d55532c,34909,1731857280918 2024-11-17T15:29:36,424 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34909-0x101268ad6a90000, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T15:29:36,424 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33973-0x101268ad6a90001, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T15:29:36,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34909-0x101268ad6a90000, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:29:36,425 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T15:29:36,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33973-0x101268ad6a90001, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:29:36,425 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T15:29:36,425 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T15:29:36,425 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:29:36,425 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34909-0x101268ad6a90000, quorum=127.0.0.1:56154, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T15:29:36,425 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33973-0x101268ad6a90001, quorum=127.0.0.1:56154, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T15:29:36,426 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7a780d55532c,33973,1731857281708' ***** 2024-11-17T15:29:36,426 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-17T15:29:36,426 INFO [RS:0;7a780d55532c:33973 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-17T15:29:36,426 INFO [RS:0;7a780d55532c:33973 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-17T15:29:36,426 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-17T15:29:36,426 INFO [RS:0;7a780d55532c:33973 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-17T15:29:36,427 INFO [RS:0;7a780d55532c:33973 {}] regionserver.HRegionServer(3091): Received CLOSE for 2c52e0b374b469936fb4c18092738707 2024-11-17T15:29:36,427 INFO [RS:0;7a780d55532c:33973 {}] regionserver.HRegionServer(959): stopping server 7a780d55532c,33973,1731857281708 2024-11-17T15:29:36,427 INFO [RS:0;7a780d55532c:33973 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T15:29:36,427 INFO [RS:0;7a780d55532c:33973 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7a780d55532c:33973. 2024-11-17T15:29:36,427 DEBUG [RS:0;7a780d55532c:33973 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T15:29:36,427 DEBUG [RS:0;7a780d55532c:33973 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:29:36,428 INFO [RS:0;7a780d55532c:33973 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-17T15:29:36,428 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 2c52e0b374b469936fb4c18092738707, disabling compactions & flushes 2024-11-17T15:29:36,428 INFO [RS:0;7a780d55532c:33973 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-17T15:29:36,428 INFO [RS:0;7a780d55532c:33973 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-17T15:29:36,428 INFO [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731857283864.2c52e0b374b469936fb4c18092738707. 2024-11-17T15:29:36,428 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731857283864.2c52e0b374b469936fb4c18092738707. 2024-11-17T15:29:36,428 INFO [RS:0;7a780d55532c:33973 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-17T15:29:36,428 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731857283864.2c52e0b374b469936fb4c18092738707. after waiting 0 ms 2024-11-17T15:29:36,428 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731857283864.2c52e0b374b469936fb4c18092738707. 2024-11-17T15:29:36,428 INFO [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 2c52e0b374b469936fb4c18092738707 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-17T15:29:36,428 INFO [RS:0;7a780d55532c:33973 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-17T15:29:36,428 DEBUG [RS:0;7a780d55532c:33973 {}] regionserver.HRegionServer(1325): Online Regions={2c52e0b374b469936fb4c18092738707=TestLogRolling-testSlowSyncLogRolling,,1731857283864.2c52e0b374b469936fb4c18092738707., 1588230740=hbase:meta,,1.1588230740} 2024-11-17T15:29:36,428 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T15:29:36,429 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T15:29:36,429 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T15:29:36,429 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T15:29:36,429 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T15:29:36,429 DEBUG [RS:0;7a780d55532c:33973 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 2c52e0b374b469936fb4c18092738707 2024-11-17T15:29:36,429 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-17T15:29:36,438 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/.tmp/info/583f3642de914b92a7170a9b8237963e is 1080, key is row0029/info:/1731857370397/Put/seqid=0 2024-11-17T15:29:36,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43077 is added to blk_1073741850_1026 (size=8193) 2024-11-17T15:29:36,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741850_1026 (size=8193) 2024-11-17T15:29:36,451 INFO [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/.tmp/info/583f3642de914b92a7170a9b8237963e 2024-11-17T15:29:36,456 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/hbase/meta/1588230740/.tmp/info/46455bd28d404eab8e3950d818de395d is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731857283864.2c52e0b374b469936fb4c18092738707./info:regioninfo/1731857284311/Put/seqid=0 2024-11-17T15:29:36,463 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/.tmp/info/583f3642de914b92a7170a9b8237963e as hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/info/583f3642de914b92a7170a9b8237963e 2024-11-17T15:29:36,473 INFO [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/info/583f3642de914b92a7170a9b8237963e, entries=3, sequenceid=48, filesize=8.0 K 2024-11-17T15:29:36,475 INFO [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 2c52e0b374b469936fb4c18092738707 in 46ms, sequenceid=48, compaction requested=true 2024-11-17T15:29:36,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741851_1027 (size=7016) 2024-11-17T15:29:36,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43077 is added to blk_1073741851_1027 (size=7016) 2024-11-17T15:29:36,476 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/hbase/meta/1588230740/.tmp/info/46455bd28d404eab8e3950d818de395d 2024-11-17T15:29:36,476 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731857283864.2c52e0b374b469936fb4c18092738707.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/info/4adedf6446284f2088b301b50b92ab3d, hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/info/b6094fb992024ca883b8f7aae340050a, hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/info/cbc7d7fa61ca4cd3bdc626cd2d576f19] to archive 2024-11-17T15:29:36,479 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731857283864.2c52e0b374b469936fb4c18092738707.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T15:29:36,483 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731857283864.2c52e0b374b469936fb4c18092738707.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/info/4adedf6446284f2088b301b50b92ab3d to hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/archive/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/info/4adedf6446284f2088b301b50b92ab3d 2024-11-17T15:29:36,485 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731857283864.2c52e0b374b469936fb4c18092738707.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/info/b6094fb992024ca883b8f7aae340050a to hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/archive/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/info/b6094fb992024ca883b8f7aae340050a 2024-11-17T15:29:36,487 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731857283864.2c52e0b374b469936fb4c18092738707.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/info/cbc7d7fa61ca4cd3bdc626cd2d576f19 to hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/archive/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/info/cbc7d7fa61ca4cd3bdc626cd2d576f19 2024-11-17T15:29:36,502 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731857283864.2c52e0b374b469936fb4c18092738707.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=7a780d55532c:34909 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-17T15:29:36,508 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/hbase/meta/1588230740/.tmp/ns/0bb9e011af4948b6b063037bbd469b63 is 43, key is default/ns:d/1731857283637/Put/seqid=0 2024-11-17T15:29:36,508 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731857283864.2c52e0b374b469936fb4c18092738707.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [4adedf6446284f2088b301b50b92ab3d=12509, b6094fb992024ca883b8f7aae340050a=12509, cbc7d7fa61ca4cd3bdc626cd2d576f19=12509] 2024-11-17T15:29:36,514 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/default/TestLogRolling-testSlowSyncLogRolling/2c52e0b374b469936fb4c18092738707/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-17T15:29:36,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741852_1028 (size=5153) 2024-11-17T15:29:36,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43077 is added to blk_1073741852_1028 (size=5153) 2024-11-17T15:29:36,517 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/hbase/meta/1588230740/.tmp/ns/0bb9e011af4948b6b063037bbd469b63 2024-11-17T15:29:36,518 INFO [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731857283864.2c52e0b374b469936fb4c18092738707. 2024-11-17T15:29:36,518 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 2c52e0b374b469936fb4c18092738707: Waiting for close lock at 1731857376427Running coprocessor pre-close hooks at 1731857376428 (+1 ms)Disabling compacts and flushes for region at 1731857376428Disabling writes for close at 1731857376428Obtaining lock to block concurrent updates at 1731857376428Preparing flush snapshotting stores in 2c52e0b374b469936fb4c18092738707 at 1731857376428Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731857283864.2c52e0b374b469936fb4c18092738707., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731857376429 (+1 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731857283864.2c52e0b374b469936fb4c18092738707. at 1731857376430 (+1 ms)Flushing 2c52e0b374b469936fb4c18092738707/info: creating writer at 1731857376430Flushing 2c52e0b374b469936fb4c18092738707/info: appending metadata at 1731857376438 (+8 ms)Flushing 2c52e0b374b469936fb4c18092738707/info: closing flushed file at 1731857376438Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2a089e31: reopening flushed file at 1731857376462 (+24 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 2c52e0b374b469936fb4c18092738707 in 46ms, sequenceid=48, compaction requested=true at 1731857376475 (+13 ms)Writing region close event to WAL at 1731857376509 (+34 ms)Running coprocessor post-close hooks at 1731857376516 (+7 ms)Closed at 1731857376518 (+2 ms) 2024-11-17T15:29:36,519 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731857283864.2c52e0b374b469936fb4c18092738707. 2024-11-17T15:29:36,542 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/hbase/meta/1588230740/.tmp/table/99ce3010b5d640d9962b6e4856f43afe is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731857284327/Put/seqid=0 2024-11-17T15:29:36,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741853_1029 (size=5396) 2024-11-17T15:29:36,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43077 is added to blk_1073741853_1029 (size=5396) 2024-11-17T15:29:36,548 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/hbase/meta/1588230740/.tmp/table/99ce3010b5d640d9962b6e4856f43afe 2024-11-17T15:29:36,556 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/hbase/meta/1588230740/.tmp/info/46455bd28d404eab8e3950d818de395d as hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/hbase/meta/1588230740/info/46455bd28d404eab8e3950d818de395d 2024-11-17T15:29:36,564 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/hbase/meta/1588230740/info/46455bd28d404eab8e3950d818de395d, entries=10, sequenceid=11, filesize=6.9 K 2024-11-17T15:29:36,565 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/hbase/meta/1588230740/.tmp/ns/0bb9e011af4948b6b063037bbd469b63 as hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/hbase/meta/1588230740/ns/0bb9e011af4948b6b063037bbd469b63 2024-11-17T15:29:36,572 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/hbase/meta/1588230740/ns/0bb9e011af4948b6b063037bbd469b63, entries=2, sequenceid=11, filesize=5.0 K 2024-11-17T15:29:36,574 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/hbase/meta/1588230740/.tmp/table/99ce3010b5d640d9962b6e4856f43afe as hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/hbase/meta/1588230740/table/99ce3010b5d640d9962b6e4856f43afe 2024-11-17T15:29:36,580 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/hbase/meta/1588230740/table/99ce3010b5d640d9962b6e4856f43afe, entries=2, sequenceid=11, filesize=5.3 K 2024-11-17T15:29:36,582 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 153ms, sequenceid=11, compaction requested=false 2024-11-17T15:29:36,587 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-17T15:29:36,588 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T15:29:36,588 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T15:29:36,588 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731857376428Running coprocessor pre-close hooks at 1731857376428Disabling compacts and flushes for region at 1731857376428Disabling writes for close at 1731857376429 (+1 ms)Obtaining lock to block concurrent updates at 1731857376429Preparing flush snapshotting stores in 1588230740 at 1731857376429Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731857376429Flushing stores of hbase:meta,,1.1588230740 at 1731857376430 (+1 ms)Flushing 1588230740/info: creating writer at 1731857376430Flushing 1588230740/info: appending metadata at 1731857376456 (+26 ms)Flushing 1588230740/info: closing flushed file at 1731857376456Flushing 1588230740/ns: creating writer at 1731857376484 (+28 ms)Flushing 1588230740/ns: appending metadata at 1731857376507 (+23 ms)Flushing 1588230740/ns: closing flushed file at 1731857376507Flushing 1588230740/table: creating writer at 1731857376525 (+18 ms)Flushing 1588230740/table: appending metadata at 1731857376541 (+16 ms)Flushing 1588230740/table: closing flushed file at 1731857376541Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@112acfe: reopening flushed file at 1731857376555 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7f778808: reopening flushed file at 1731857376564 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1e1a89c0: reopening flushed file at 1731857376573 (+9 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 153ms, sequenceid=11, compaction requested=false at 1731857376582 (+9 ms)Writing region close event to WAL at 1731857376583 (+1 ms)Running coprocessor post-close hooks at 1731857376588 (+5 ms)Closed at 1731857376588 2024-11-17T15:29:36,589 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-17T15:29:36,629 INFO [RS:0;7a780d55532c:33973 {}] regionserver.HRegionServer(976): stopping server 7a780d55532c,33973,1731857281708; all regions closed. 2024-11-17T15:29:36,631 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:36,631 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:36,631 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:36,631 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:36,631 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:36,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43077 is added to blk_1073741834_1010 (size=3066) 2024-11-17T15:29:36,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741834_1010 (size=3066) 2024-11-17T15:29:36,638 DEBUG [RS:0;7a780d55532c:33973 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/oldWALs 2024-11-17T15:29:36,638 INFO [RS:0;7a780d55532c:33973 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7a780d55532c%2C33973%2C1731857281708.meta:.meta(num 1731857283495) 2024-11-17T15:29:36,639 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:36,639 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:36,639 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:36,639 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:36,640 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:36,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741847_1023 (size=12695) 2024-11-17T15:29:36,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43077 is added to blk_1073741847_1023 (size=12695) 2024-11-17T15:29:36,647 DEBUG [RS:0;7a780d55532c:33973 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/oldWALs 2024-11-17T15:29:36,647 INFO [RS:0;7a780d55532c:33973 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7a780d55532c%2C33973%2C1731857281708:(num 1731857356363) 2024-11-17T15:29:36,647 DEBUG [RS:0;7a780d55532c:33973 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:29:36,647 INFO [RS:0;7a780d55532c:33973 {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T15:29:36,648 INFO [RS:0;7a780d55532c:33973 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T15:29:36,648 INFO [RS:0;7a780d55532c:33973 {}] hbase.ChoreService(370): Chore service for: regionserver/7a780d55532c:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-17T15:29:36,648 INFO [RS:0;7a780d55532c:33973 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T15:29:36,648 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T15:29:36,648 INFO [RS:0;7a780d55532c:33973 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33973 2024-11-17T15:29:36,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34909-0x101268ad6a90000, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T15:29:36,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33973-0x101268ad6a90001, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7a780d55532c,33973,1731857281708 2024-11-17T15:29:36,652 INFO [RS:0;7a780d55532c:33973 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T15:29:36,654 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7a780d55532c,33973,1731857281708] 2024-11-17T15:29:36,656 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7a780d55532c,33973,1731857281708 already deleted, retry=false 2024-11-17T15:29:36,656 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7a780d55532c,33973,1731857281708 expired; onlineServers=0 2024-11-17T15:29:36,656 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7a780d55532c,34909,1731857280918' ***** 2024-11-17T15:29:36,656 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-17T15:29:36,656 INFO [M:0;7a780d55532c:34909 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T15:29:36,656 INFO [M:0;7a780d55532c:34909 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T15:29:36,656 DEBUG [M:0;7a780d55532c:34909 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-17T15:29:36,656 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-17T15:29:36,656 DEBUG [M:0;7a780d55532c:34909 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-17T15:29:36,656 DEBUG [master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.large.0-1731857282762 {}] cleaner.HFileCleaner(306): Exit Thread[master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.large.0-1731857282762,5,FailOnTimeoutGroup] 2024-11-17T15:29:36,656 DEBUG [master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.small.0-1731857282765 {}] cleaner.HFileCleaner(306): Exit Thread[master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.small.0-1731857282765,5,FailOnTimeoutGroup] 2024-11-17T15:29:36,657 INFO [M:0;7a780d55532c:34909 {}] hbase.ChoreService(370): Chore service for: master/7a780d55532c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-17T15:29:36,657 INFO [M:0;7a780d55532c:34909 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T15:29:36,657 DEBUG [M:0;7a780d55532c:34909 {}] master.HMaster(1795): Stopping service threads 2024-11-17T15:29:36,657 INFO [M:0;7a780d55532c:34909 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-17T15:29:36,657 INFO [M:0;7a780d55532c:34909 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T15:29:36,657 INFO [M:0;7a780d55532c:34909 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-17T15:29:36,658 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-17T15:29:36,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34909-0x101268ad6a90000, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-17T15:29:36,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34909-0x101268ad6a90000, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:29:36,660 DEBUG [M:0;7a780d55532c:34909 {}] zookeeper.ZKUtil(347): master:34909-0x101268ad6a90000, quorum=127.0.0.1:56154, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-17T15:29:36,660 WARN [M:0;7a780d55532c:34909 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-17T15:29:36,661 INFO [M:0;7a780d55532c:34909 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/.lastflushedseqids 2024-11-17T15:29:36,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43077 is added to blk_1073741854_1030 (size=130) 2024-11-17T15:29:36,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741854_1030 (size=130) 2024-11-17T15:29:36,673 INFO [M:0;7a780d55532c:34909 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-17T15:29:36,674 INFO [M:0;7a780d55532c:34909 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-17T15:29:36,674 DEBUG [M:0;7a780d55532c:34909 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T15:29:36,674 INFO [M:0;7a780d55532c:34909 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:29:36,674 DEBUG [M:0;7a780d55532c:34909 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:29:36,674 DEBUG [M:0;7a780d55532c:34909 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T15:29:36,674 DEBUG [M:0;7a780d55532c:34909 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:29:36,674 INFO [M:0;7a780d55532c:34909 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-17T15:29:36,691 DEBUG [M:0;7a780d55532c:34909 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d03be36284334c11ac33bc7cd2b217c4 is 82, key is hbase:meta,,1/info:regioninfo/1731857283567/Put/seqid=0 2024-11-17T15:29:36,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741855_1031 (size=5672) 2024-11-17T15:29:36,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43077 is added to blk_1073741855_1031 (size=5672) 2024-11-17T15:29:36,698 INFO [M:0;7a780d55532c:34909 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d03be36284334c11ac33bc7cd2b217c4 2024-11-17T15:29:36,720 DEBUG [M:0;7a780d55532c:34909 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a536541685a143c5aa46dce5d18cbfdf is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731857284335/Put/seqid=0 2024-11-17T15:29:36,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43077 is added to blk_1073741856_1032 (size=6247) 2024-11-17T15:29:36,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741856_1032 (size=6247) 2024-11-17T15:29:36,731 INFO [M:0;7a780d55532c:34909 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a536541685a143c5aa46dce5d18cbfdf 2024-11-17T15:29:36,738 INFO [M:0;7a780d55532c:34909 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a536541685a143c5aa46dce5d18cbfdf 2024-11-17T15:29:36,753 DEBUG [M:0;7a780d55532c:34909 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/027eef9f03624c53bab5ba011f7dbaba is 69, key is 7a780d55532c,33973,1731857281708/rs:state/1731857282802/Put/seqid=0 2024-11-17T15:29:36,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33973-0x101268ad6a90001, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T15:29:36,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33973-0x101268ad6a90001, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T15:29:36,755 INFO [RS:0;7a780d55532c:33973 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T15:29:36,755 INFO [RS:0;7a780d55532c:33973 {}] regionserver.HRegionServer(1031): Exiting; stopping=7a780d55532c,33973,1731857281708; zookeeper connection closed. 2024-11-17T15:29:36,756 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7e1edf28 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7e1edf28 2024-11-17T15:29:36,756 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-17T15:29:36,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43077 is added to blk_1073741857_1033 (size=5156) 2024-11-17T15:29:36,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741857_1033 (size=5156) 2024-11-17T15:29:36,761 INFO [M:0;7a780d55532c:34909 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/027eef9f03624c53bab5ba011f7dbaba 2024-11-17T15:29:36,783 DEBUG [M:0;7a780d55532c:34909 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/52284be97c1243ab9d3bbfc592712dda is 52, key is load_balancer_on/state:d/1731857283831/Put/seqid=0 2024-11-17T15:29:36,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741858_1034 (size=5056) 2024-11-17T15:29:36,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43077 is added to blk_1073741858_1034 (size=5056) 2024-11-17T15:29:36,792 INFO [M:0;7a780d55532c:34909 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/52284be97c1243ab9d3bbfc592712dda 2024-11-17T15:29:36,799 DEBUG [M:0;7a780d55532c:34909 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d03be36284334c11ac33bc7cd2b217c4 as hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d03be36284334c11ac33bc7cd2b217c4 2024-11-17T15:29:36,805 INFO [M:0;7a780d55532c:34909 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d03be36284334c11ac33bc7cd2b217c4, entries=8, sequenceid=59, filesize=5.5 K 2024-11-17T15:29:36,807 DEBUG [M:0;7a780d55532c:34909 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a536541685a143c5aa46dce5d18cbfdf as hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a536541685a143c5aa46dce5d18cbfdf 2024-11-17T15:29:36,813 INFO [M:0;7a780d55532c:34909 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a536541685a143c5aa46dce5d18cbfdf 2024-11-17T15:29:36,813 INFO [M:0;7a780d55532c:34909 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a536541685a143c5aa46dce5d18cbfdf, entries=6, sequenceid=59, filesize=6.1 K 2024-11-17T15:29:36,815 DEBUG [M:0;7a780d55532c:34909 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/027eef9f03624c53bab5ba011f7dbaba as hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/027eef9f03624c53bab5ba011f7dbaba 2024-11-17T15:29:36,823 INFO [M:0;7a780d55532c:34909 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/027eef9f03624c53bab5ba011f7dbaba, entries=1, sequenceid=59, filesize=5.0 K 2024-11-17T15:29:36,825 DEBUG [M:0;7a780d55532c:34909 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/52284be97c1243ab9d3bbfc592712dda as hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/52284be97c1243ab9d3bbfc592712dda 2024-11-17T15:29:36,833 INFO [M:0;7a780d55532c:34909 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/52284be97c1243ab9d3bbfc592712dda, entries=1, sequenceid=59, filesize=4.9 K 2024-11-17T15:29:36,835 INFO [M:0;7a780d55532c:34909 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 160ms, sequenceid=59, compaction requested=false 2024-11-17T15:29:36,837 INFO [M:0;7a780d55532c:34909 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:29:36,837 DEBUG [M:0;7a780d55532c:34909 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731857376674Disabling compacts and flushes for region at 1731857376674Disabling writes for close at 1731857376674Obtaining lock to block concurrent updates at 1731857376674Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731857376674Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1731857376675 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731857376675Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731857376676 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731857376691 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731857376691Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731857376704 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731857376720 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731857376720Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731857376738 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731857376753 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731857376753Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731857376767 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731857376783 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731857376783Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4c786b25: reopening flushed file at 1731857376798 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@f3f3c49: reopening flushed file at 1731857376806 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@581a96c: reopening flushed file at 1731857376814 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7a743c2c: reopening flushed file at 1731857376824 (+10 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 160ms, sequenceid=59, compaction requested=false at 1731857376835 (+11 ms)Writing region close event to WAL at 1731857376836 (+1 ms)Closed at 1731857376836 2024-11-17T15:29:36,838 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:36,838 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:36,838 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:36,838 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:36,838 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:36,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741830_1006 (size=27973) 2024-11-17T15:29:36,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43077 is added to blk_1073741830_1006 (size=27973) 2024-11-17T15:29:36,842 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T15:29:36,842 INFO [M:0;7a780d55532c:34909 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-17T15:29:36,842 INFO [M:0;7a780d55532c:34909 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34909 2024-11-17T15:29:36,843 INFO [M:0;7a780d55532c:34909 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T15:29:36,888 INFO [regionserver/7a780d55532c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T15:29:36,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34909-0x101268ad6a90000, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T15:29:36,945 INFO [M:0;7a780d55532c:34909 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T15:29:36,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34909-0x101268ad6a90000, quorum=127.0.0.1:56154, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T15:29:36,950 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@55d18735{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:29:36,952 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@15370523{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T15:29:36,952 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T15:29:36,953 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@335a4f9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T15:29:36,953 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ae73635{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/hadoop.log.dir/,STOPPED} 2024-11-17T15:29:36,956 WARN [BP-245048582-172.17.0.2-1731857277960 heartbeating to localhost/127.0.0.1:39521 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T15:29:36,956 WARN [BP-245048582-172.17.0.2-1731857277960 heartbeating to localhost/127.0.0.1:39521 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-245048582-172.17.0.2-1731857277960 (Datanode Uuid 1b3c695c-4d41-404b-9454-8fdc542079f1) service to localhost/127.0.0.1:39521 2024-11-17T15:29:36,956 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T15:29:36,956 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T15:29:36,957 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/cluster_e684ad8a-3b6f-00db-fd58-5febbe409a88/data/data3/current/BP-245048582-172.17.0.2-1731857277960 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:29:36,957 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/cluster_e684ad8a-3b6f-00db-fd58-5febbe409a88/data/data4/current/BP-245048582-172.17.0.2-1731857277960 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:29:36,958 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T15:29:36,966 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@59e63bea{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:29:36,967 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@264a9341{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T15:29:36,967 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T15:29:36,967 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5bdc1e47{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T15:29:36,967 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2aa5bb6e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/hadoop.log.dir/,STOPPED} 2024-11-17T15:29:36,969 WARN [BP-245048582-172.17.0.2-1731857277960 heartbeating to localhost/127.0.0.1:39521 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T15:29:36,969 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T15:29:36,969 WARN [BP-245048582-172.17.0.2-1731857277960 heartbeating to localhost/127.0.0.1:39521 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-245048582-172.17.0.2-1731857277960 (Datanode Uuid 38e3011b-f23a-4452-a6cc-b599c74faa3c) service to localhost/127.0.0.1:39521 2024-11-17T15:29:36,969 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T15:29:36,969 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/cluster_e684ad8a-3b6f-00db-fd58-5febbe409a88/data/data1/current/BP-245048582-172.17.0.2-1731857277960 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:29:36,970 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/cluster_e684ad8a-3b6f-00db-fd58-5febbe409a88/data/data2/current/BP-245048582-172.17.0.2-1731857277960 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:29:36,970 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T15:29:36,981 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4c77270f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T15:29:36,982 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c5145e6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T15:29:36,982 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T15:29:36,982 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46a86f8c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T15:29:36,982 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f2ab976{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/hadoop.log.dir/,STOPPED} 2024-11-17T15:29:36,992 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-17T15:29:37,023 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-17T15:29:37,033 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39521 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:39521 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39521 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: master/7a780d55532c:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:39521 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39521 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:39521 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:39521 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@c3327a java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/7a780d55532c:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39521 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: regionserver/7a780d55532c:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=404 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=187 (was 258), ProcessCount=11 (was 11), AvailableMemoryMB=3809 (was 4053) 2024-11-17T15:29:37,041 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=404, MaxFileDescriptor=1048576, SystemLoadAverage=187, ProcessCount=11, AvailableMemoryMB=3802 2024-11-17T15:29:37,041 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-17T15:29:37,041 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/hadoop.log.dir so I do NOT create it in target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89 2024-11-17T15:29:37,042 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f17f4686-1ff9-2c9f-5bb8-d4400c5cfaf4/hadoop.tmp.dir so I do NOT create it in target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89 2024-11-17T15:29:37,042 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/cluster_38d30a54-7e15-888c-9b9b-2d7e57dc3145, deleteOnExit=true 2024-11-17T15:29:37,042 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-17T15:29:37,042 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/test.cache.data in system properties and HBase conf 2024-11-17T15:29:37,042 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/hadoop.tmp.dir in system properties and HBase conf 2024-11-17T15:29:37,042 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/hadoop.log.dir in system properties and HBase conf 2024-11-17T15:29:37,042 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-17T15:29:37,042 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-17T15:29:37,042 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-17T15:29:37,043 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-17T15:29:37,043 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-17T15:29:37,043 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-17T15:29:37,043 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-17T15:29:37,043 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T15:29:37,043 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-17T15:29:37,043 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-17T15:29:37,043 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T15:29:37,043 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T15:29:37,043 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-17T15:29:37,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/nfs.dump.dir in system properties and HBase conf 2024-11-17T15:29:37,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/java.io.tmpdir in system properties and HBase conf 2024-11-17T15:29:37,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T15:29:37,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-17T15:29:37,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-17T15:29:37,059 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T15:29:37,148 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:29:37,157 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T15:29:37,158 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T15:29:37,158 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T15:29:37,159 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T15:29:37,159 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:29:37,160 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@a54e173{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/hadoop.log.dir/,AVAILABLE} 2024-11-17T15:29:37,160 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7a8c06e3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T15:29:37,279 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@53521f59{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/java.io.tmpdir/jetty-localhost-33643-hadoop-hdfs-3_4_1-tests_jar-_-any-5402989951231586134/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T15:29:37,280 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2fbdac8b{HTTP/1.1, (http/1.1)}{localhost:33643} 2024-11-17T15:29:37,280 INFO [Time-limited test {}] server.Server(415): Started @101202ms 2024-11-17T15:29:37,295 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T15:29:37,383 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:29:37,387 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T15:29:37,388 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T15:29:37,388 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T15:29:37,388 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T15:29:37,389 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4de0900d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/hadoop.log.dir/,AVAILABLE} 2024-11-17T15:29:37,389 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@39f44d3e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T15:29:37,506 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5b15f3df{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/java.io.tmpdir/jetty-localhost-34013-hadoop-hdfs-3_4_1-tests_jar-_-any-6431058630145665232/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:29:37,506 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@72a59bd{HTTP/1.1, (http/1.1)}{localhost:34013} 2024-11-17T15:29:37,506 INFO [Time-limited test {}] server.Server(415): Started @101428ms 2024-11-17T15:29:37,508 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T15:29:37,543 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:29:37,547 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T15:29:37,548 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T15:29:37,548 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T15:29:37,548 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T15:29:37,548 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@113235ce{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/hadoop.log.dir/,AVAILABLE} 2024-11-17T15:29:37,549 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4e477227{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T15:29:37,623 WARN [Thread-437 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/cluster_38d30a54-7e15-888c-9b9b-2d7e57dc3145/data/data1/current/BP-417743486-172.17.0.2-1731857377077/current, will proceed with Du for space computation calculation, 2024-11-17T15:29:37,624 WARN [Thread-438 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/cluster_38d30a54-7e15-888c-9b9b-2d7e57dc3145/data/data2/current/BP-417743486-172.17.0.2-1731857377077/current, will proceed with Du for space computation calculation, 2024-11-17T15:29:37,648 WARN [Thread-416 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T15:29:37,652 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa35078dfb0383462 with lease ID 0x957e92febc1a861b: Processing first storage report for DS-1b5f8df8-9276-4ae7-8b08-4782469f2ad1 from datanode DatanodeRegistration(127.0.0.1:42953, datanodeUuid=a0305420-1d88-496f-b2e3-36dd0f2419d2, infoPort=36219, infoSecurePort=0, ipcPort=45549, storageInfo=lv=-57;cid=testClusterID;nsid=164695608;c=1731857377077) 2024-11-17T15:29:37,652 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa35078dfb0383462 with lease ID 0x957e92febc1a861b: from storage DS-1b5f8df8-9276-4ae7-8b08-4782469f2ad1 node DatanodeRegistration(127.0.0.1:42953, datanodeUuid=a0305420-1d88-496f-b2e3-36dd0f2419d2, infoPort=36219, infoSecurePort=0, ipcPort=45549, storageInfo=lv=-57;cid=testClusterID;nsid=164695608;c=1731857377077), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-17T15:29:37,652 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa35078dfb0383462 with lease ID 0x957e92febc1a861b: Processing first storage report for DS-ab05e53b-0ba3-4084-afe0-f289c6fffcf7 from datanode DatanodeRegistration(127.0.0.1:42953, datanodeUuid=a0305420-1d88-496f-b2e3-36dd0f2419d2, infoPort=36219, infoSecurePort=0, ipcPort=45549, storageInfo=lv=-57;cid=testClusterID;nsid=164695608;c=1731857377077) 2024-11-17T15:29:37,652 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa35078dfb0383462 with lease ID 0x957e92febc1a861b: from storage DS-ab05e53b-0ba3-4084-afe0-f289c6fffcf7 node DatanodeRegistration(127.0.0.1:42953, datanodeUuid=a0305420-1d88-496f-b2e3-36dd0f2419d2, infoPort=36219, infoSecurePort=0, ipcPort=45549, storageInfo=lv=-57;cid=testClusterID;nsid=164695608;c=1731857377077), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T15:29:37,673 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@9acc548{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/java.io.tmpdir/jetty-localhost-45765-hadoop-hdfs-3_4_1-tests_jar-_-any-12876442159659198029/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:29:37,674 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3509b1e6{HTTP/1.1, (http/1.1)}{localhost:45765} 2024-11-17T15:29:37,674 INFO [Time-limited test {}] server.Server(415): Started @101596ms 2024-11-17T15:29:37,676 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T15:29:37,778 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/cluster_38d30a54-7e15-888c-9b9b-2d7e57dc3145/data/data3/current/BP-417743486-172.17.0.2-1731857377077/current, will proceed with Du for space computation calculation, 2024-11-17T15:29:37,779 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/cluster_38d30a54-7e15-888c-9b9b-2d7e57dc3145/data/data4/current/BP-417743486-172.17.0.2-1731857377077/current, will proceed with Du for space computation calculation, 2024-11-17T15:29:37,797 WARN [Thread-452 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T15:29:37,800 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfce3c85f97cf73f7 with lease ID 0x957e92febc1a861c: Processing first storage report for DS-3eab1c22-26cd-45fb-af3b-f3f6510a84f5 from datanode DatanodeRegistration(127.0.0.1:37601, datanodeUuid=94407f10-db15-430f-9b07-a7fefda7508a, infoPort=36233, infoSecurePort=0, ipcPort=37465, storageInfo=lv=-57;cid=testClusterID;nsid=164695608;c=1731857377077) 2024-11-17T15:29:37,800 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfce3c85f97cf73f7 with lease ID 0x957e92febc1a861c: from storage DS-3eab1c22-26cd-45fb-af3b-f3f6510a84f5 node DatanodeRegistration(127.0.0.1:37601, datanodeUuid=94407f10-db15-430f-9b07-a7fefda7508a, infoPort=36233, infoSecurePort=0, ipcPort=37465, storageInfo=lv=-57;cid=testClusterID;nsid=164695608;c=1731857377077), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T15:29:37,800 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfce3c85f97cf73f7 with lease ID 0x957e92febc1a861c: Processing first storage report for DS-955e27bf-1cfb-4ad2-9571-06d395d2dccf from datanode DatanodeRegistration(127.0.0.1:37601, datanodeUuid=94407f10-db15-430f-9b07-a7fefda7508a, infoPort=36233, infoSecurePort=0, ipcPort=37465, storageInfo=lv=-57;cid=testClusterID;nsid=164695608;c=1731857377077) 2024-11-17T15:29:37,800 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfce3c85f97cf73f7 with lease ID 0x957e92febc1a861c: from storage DS-955e27bf-1cfb-4ad2-9571-06d395d2dccf node DatanodeRegistration(127.0.0.1:37601, datanodeUuid=94407f10-db15-430f-9b07-a7fefda7508a, infoPort=36233, infoSecurePort=0, ipcPort=37465, storageInfo=lv=-57;cid=testClusterID;nsid=164695608;c=1731857377077), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T15:29:37,806 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89 2024-11-17T15:29:37,808 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/cluster_38d30a54-7e15-888c-9b9b-2d7e57dc3145/zookeeper_0, clientPort=61433, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/cluster_38d30a54-7e15-888c-9b9b-2d7e57dc3145/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/cluster_38d30a54-7e15-888c-9b9b-2d7e57dc3145/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-17T15:29:37,809 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=61433 2024-11-17T15:29:37,809 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:29:37,811 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:29:37,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37601 is added to blk_1073741825_1001 (size=7) 2024-11-17T15:29:37,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42953 is added to blk_1073741825_1001 (size=7) 2024-11-17T15:29:37,823 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361 with version=8 2024-11-17T15:29:37,823 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/hbase-staging 2024-11-17T15:29:37,826 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7a780d55532c:0 server-side Connection retries=45 2024-11-17T15:29:37,826 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T15:29:37,826 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T15:29:37,826 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T15:29:37,826 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T15:29:37,826 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T15:29:37,826 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-17T15:29:37,826 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T15:29:37,827 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37363 2024-11-17T15:29:37,828 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37363 connecting to ZooKeeper ensemble=127.0.0.1:61433 2024-11-17T15:29:37,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:373630x0, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T15:29:37,836 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37363-0x101268c549f0000 connected 2024-11-17T15:29:37,854 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:29:37,856 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:29:37,858 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37363-0x101268c549f0000, quorum=127.0.0.1:61433, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T15:29:37,859 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361, hbase.cluster.distributed=false 2024-11-17T15:29:37,860 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37363-0x101268c549f0000, quorum=127.0.0.1:61433, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T15:29:37,861 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37363 2024-11-17T15:29:37,861 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37363 2024-11-17T15:29:37,861 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37363 2024-11-17T15:29:37,862 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37363 2024-11-17T15:29:37,862 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37363 2024-11-17T15:29:37,879 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7a780d55532c:0 server-side Connection retries=45 2024-11-17T15:29:37,879 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T15:29:37,880 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T15:29:37,880 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T15:29:37,880 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T15:29:37,880 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T15:29:37,880 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-17T15:29:37,880 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T15:29:37,881 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35033 2024-11-17T15:29:37,882 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35033 connecting to ZooKeeper ensemble=127.0.0.1:61433 2024-11-17T15:29:37,883 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:29:37,885 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:29:37,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:350330x0, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T15:29:37,891 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:350330x0, quorum=127.0.0.1:61433, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T15:29:37,891 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35033-0x101268c549f0001 connected 2024-11-17T15:29:37,891 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-17T15:29:37,894 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-17T15:29:37,895 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35033-0x101268c549f0001, quorum=127.0.0.1:61433, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-17T15:29:37,896 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35033-0x101268c549f0001, quorum=127.0.0.1:61433, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T15:29:37,897 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35033 2024-11-17T15:29:37,897 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35033 2024-11-17T15:29:37,899 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35033 2024-11-17T15:29:37,900 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35033 2024-11-17T15:29:37,904 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35033 2024-11-17T15:29:37,917 DEBUG [M:0;7a780d55532c:37363 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7a780d55532c:37363 2024-11-17T15:29:37,918 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7a780d55532c,37363,1731857377825 2024-11-17T15:29:37,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35033-0x101268c549f0001, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T15:29:37,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37363-0x101268c549f0000, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T15:29:37,921 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37363-0x101268c549f0000, quorum=127.0.0.1:61433, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7a780d55532c,37363,1731857377825 2024-11-17T15:29:37,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35033-0x101268c549f0001, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-17T15:29:37,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37363-0x101268c549f0000, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:29:37,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35033-0x101268c549f0001, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:29:37,925 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37363-0x101268c549f0000, quorum=127.0.0.1:61433, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-17T15:29:37,926 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7a780d55532c,37363,1731857377825 from backup master directory 2024-11-17T15:29:37,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35033-0x101268c549f0001, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T15:29:37,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37363-0x101268c549f0000, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7a780d55532c,37363,1731857377825 2024-11-17T15:29:37,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37363-0x101268c549f0000, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T15:29:37,928 WARN [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T15:29:37,928 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7a780d55532c,37363,1731857377825 2024-11-17T15:29:37,935 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/hbase.id] with ID: 4cd2834f-a72a-4c19-8ef1-df5a9e6b7950 2024-11-17T15:29:37,935 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/.tmp/hbase.id 2024-11-17T15:29:37,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42953 is added to blk_1073741826_1002 (size=42) 2024-11-17T15:29:37,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37601 is added to blk_1073741826_1002 (size=42) 2024-11-17T15:29:37,946 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/.tmp/hbase.id]:[hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/hbase.id] 2024-11-17T15:29:37,961 INFO [master/7a780d55532c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:29:37,961 INFO [master/7a780d55532c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-17T15:29:37,963 INFO [master/7a780d55532c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-17T15:29:37,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37363-0x101268c549f0000, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:29:37,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35033-0x101268c549f0001, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:29:37,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42953 is added to blk_1073741827_1003 (size=196) 2024-11-17T15:29:37,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37601 is added to blk_1073741827_1003 (size=196) 2024-11-17T15:29:37,979 INFO [master/7a780d55532c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T15:29:37,981 INFO [master/7a780d55532c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-17T15:29:37,981 INFO [master/7a780d55532c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T15:29:37,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42953 is added to blk_1073741828_1004 (size=1189) 2024-11-17T15:29:37,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37601 is added to blk_1073741828_1004 (size=1189) 2024-11-17T15:29:37,994 INFO [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/MasterData/data/master/store 2024-11-17T15:29:38,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37601 is added to blk_1073741829_1005 (size=34) 2024-11-17T15:29:38,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42953 is added to blk_1073741829_1005 (size=34) 2024-11-17T15:29:38,003 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:29:38,003 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T15:29:38,003 INFO [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:29:38,003 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:29:38,003 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T15:29:38,003 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:29:38,003 INFO [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:29:38,004 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731857378003Disabling compacts and flushes for region at 1731857378003Disabling writes for close at 1731857378003Writing region close event to WAL at 1731857378003Closed at 1731857378003 2024-11-17T15:29:38,005 WARN [master/7a780d55532c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/MasterData/data/master/store/.initializing 2024-11-17T15:29:38,005 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/MasterData/WALs/7a780d55532c,37363,1731857377825 2024-11-17T15:29:38,008 INFO [master/7a780d55532c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a780d55532c%2C37363%2C1731857377825, suffix=, logDir=hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/MasterData/WALs/7a780d55532c,37363,1731857377825, archiveDir=hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/MasterData/oldWALs, maxLogs=10 2024-11-17T15:29:38,008 INFO [master/7a780d55532c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C37363%2C1731857377825.1731857378008 2024-11-17T15:29:38,016 INFO [master/7a780d55532c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/MasterData/WALs/7a780d55532c,37363,1731857377825/7a780d55532c%2C37363%2C1731857377825.1731857378008 2024-11-17T15:29:38,019 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36219:36219),(127.0.0.1/127.0.0.1:36233:36233)] 2024-11-17T15:29:38,022 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-17T15:29:38,022 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:29:38,022 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:29:38,022 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:29:38,024 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:29:38,026 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-17T15:29:38,026 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:29:38,026 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:29:38,027 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:29:38,028 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-17T15:29:38,028 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:29:38,029 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T15:29:38,029 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:29:38,031 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-17T15:29:38,031 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:29:38,032 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T15:29:38,032 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:29:38,033 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-17T15:29:38,033 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:29:38,034 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T15:29:38,034 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:29:38,035 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:29:38,035 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:29:38,037 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:29:38,037 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:29:38,038 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-17T15:29:38,039 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:29:38,042 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T15:29:38,042 INFO [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=880639, jitterRate=0.11979083716869354}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-17T15:29:38,044 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731857378022Initializing all the Stores at 1731857378024 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857378024Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857378024Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857378024Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857378024Cleaning up temporary data from old regions at 1731857378037 (+13 ms)Region opened successfully at 1731857378043 (+6 ms) 2024-11-17T15:29:38,045 INFO [master/7a780d55532c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-17T15:29:38,050 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4aa59991, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7a780d55532c/172.17.0.2:0 2024-11-17T15:29:38,051 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-17T15:29:38,051 INFO [master/7a780d55532c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-17T15:29:38,051 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-17T15:29:38,051 INFO [master/7a780d55532c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-17T15:29:38,052 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-17T15:29:38,052 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-17T15:29:38,052 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-17T15:29:38,055 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-17T15:29:38,056 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37363-0x101268c549f0000, quorum=127.0.0.1:61433, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-17T15:29:38,057 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-17T15:29:38,058 INFO [master/7a780d55532c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-17T15:29:38,058 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37363-0x101268c549f0000, quorum=127.0.0.1:61433, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-17T15:29:38,061 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-17T15:29:38,061 INFO [master/7a780d55532c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-17T15:29:38,062 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37363-0x101268c549f0000, quorum=127.0.0.1:61433, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-17T15:29:38,064 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-17T15:29:38,065 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37363-0x101268c549f0000, quorum=127.0.0.1:61433, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-17T15:29:38,066 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-17T15:29:38,071 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37363-0x101268c549f0000, quorum=127.0.0.1:61433, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-17T15:29:38,073 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-17T15:29:38,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35033-0x101268c549f0001, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T15:29:38,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37363-0x101268c549f0000, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T15:29:38,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35033-0x101268c549f0001, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:29:38,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37363-0x101268c549f0000, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:29:38,077 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7a780d55532c,37363,1731857377825, sessionid=0x101268c549f0000, setting cluster-up flag (Was=false) 2024-11-17T15:29:38,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35033-0x101268c549f0001, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:29:38,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37363-0x101268c549f0000, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:29:38,088 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-17T15:29:38,089 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7a780d55532c,37363,1731857377825 2024-11-17T15:29:38,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35033-0x101268c549f0001, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:29:38,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37363-0x101268c549f0000, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:29:38,100 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-17T15:29:38,101 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7a780d55532c,37363,1731857377825 2024-11-17T15:29:38,102 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-17T15:29:38,104 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-17T15:29:38,105 INFO [master/7a780d55532c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-17T15:29:38,105 INFO [master/7a780d55532c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-17T15:29:38,105 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7a780d55532c,37363,1731857377825 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-17T15:29:38,106 INFO [RS:0;7a780d55532c:35033 {}] regionserver.HRegionServer(746): ClusterId : 4cd2834f-a72a-4c19-8ef1-df5a9e6b7950 2024-11-17T15:29:38,106 DEBUG [RS:0;7a780d55532c:35033 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-17T15:29:38,107 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7a780d55532c:0, corePoolSize=5, maxPoolSize=5 2024-11-17T15:29:38,107 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7a780d55532c:0, corePoolSize=5, maxPoolSize=5 2024-11-17T15:29:38,107 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7a780d55532c:0, corePoolSize=5, maxPoolSize=5 2024-11-17T15:29:38,107 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7a780d55532c:0, corePoolSize=5, maxPoolSize=5 2024-11-17T15:29:38,107 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7a780d55532c:0, corePoolSize=10, maxPoolSize=10 2024-11-17T15:29:38,107 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:38,107 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7a780d55532c:0, corePoolSize=2, maxPoolSize=2 2024-11-17T15:29:38,107 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:38,108 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731857408108 2024-11-17T15:29:38,109 DEBUG [RS:0;7a780d55532c:35033 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-17T15:29:38,109 DEBUG [RS:0;7a780d55532c:35033 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-17T15:29:38,109 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-17T15:29:38,109 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-17T15:29:38,109 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-17T15:29:38,109 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-17T15:29:38,109 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-17T15:29:38,109 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-17T15:29:38,109 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:38,110 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-17T15:29:38,110 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-17T15:29:38,110 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-17T15:29:38,110 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T15:29:38,110 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-17T15:29:38,110 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-17T15:29:38,110 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-17T15:29:38,111 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.large.0-1731857378110,5,FailOnTimeoutGroup] 2024-11-17T15:29:38,111 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.small.0-1731857378111,5,FailOnTimeoutGroup] 2024-11-17T15:29:38,111 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:38,111 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-17T15:29:38,111 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:38,111 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:38,112 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:29:38,112 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-17T15:29:38,113 DEBUG [RS:0;7a780d55532c:35033 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-17T15:29:38,113 DEBUG [RS:0;7a780d55532c:35033 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17c3ca93, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7a780d55532c/172.17.0.2:0 2024-11-17T15:29:38,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37601 is added to blk_1073741831_1007 (size=1321) 2024-11-17T15:29:38,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42953 is added to blk_1073741831_1007 (size=1321) 2024-11-17T15:29:38,123 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-17T15:29:38,124 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361 2024-11-17T15:29:38,129 DEBUG [RS:0;7a780d55532c:35033 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7a780d55532c:35033 2024-11-17T15:29:38,129 INFO [RS:0;7a780d55532c:35033 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-17T15:29:38,129 INFO [RS:0;7a780d55532c:35033 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-17T15:29:38,129 DEBUG [RS:0;7a780d55532c:35033 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-17T15:29:38,130 INFO [RS:0;7a780d55532c:35033 {}] regionserver.HRegionServer(2659): reportForDuty to master=7a780d55532c,37363,1731857377825 with port=35033, startcode=1731857377879 2024-11-17T15:29:38,130 DEBUG [RS:0;7a780d55532c:35033 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-17T15:29:38,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37601 is added to blk_1073741832_1008 (size=32) 2024-11-17T15:29:38,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42953 is added to blk_1073741832_1008 (size=32) 2024-11-17T15:29:38,133 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:29:38,137 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T15:29:38,138 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34997, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-17T15:29:38,138 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37363 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7a780d55532c,35033,1731857377879 2024-11-17T15:29:38,139 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37363 {}] master.ServerManager(517): Registering regionserver=7a780d55532c,35033,1731857377879 2024-11-17T15:29:38,139 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T15:29:38,139 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:29:38,140 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:29:38,140 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T15:29:38,141 DEBUG [RS:0;7a780d55532c:35033 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361 2024-11-17T15:29:38,141 DEBUG [RS:0;7a780d55532c:35033 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45951 2024-11-17T15:29:38,141 DEBUG [RS:0;7a780d55532c:35033 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-17T15:29:38,143 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T15:29:38,143 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:29:38,143 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:29:38,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37363-0x101268c549f0000, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T15:29:38,144 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T15:29:38,144 DEBUG [RS:0;7a780d55532c:35033 {}] zookeeper.ZKUtil(111): regionserver:35033-0x101268c549f0001, quorum=127.0.0.1:61433, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7a780d55532c,35033,1731857377879 2024-11-17T15:29:38,144 WARN [RS:0;7a780d55532c:35033 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T15:29:38,145 INFO [RS:0;7a780d55532c:35033 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T15:29:38,145 DEBUG [RS:0;7a780d55532c:35033 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/WALs/7a780d55532c,35033,1731857377879 2024-11-17T15:29:38,145 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7a780d55532c,35033,1731857377879] 2024-11-17T15:29:38,145 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T15:29:38,146 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:29:38,146 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:29:38,146 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T15:29:38,148 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T15:29:38,148 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:29:38,149 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:29:38,149 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T15:29:38,150 INFO [RS:0;7a780d55532c:35033 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-17T15:29:38,150 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/data/hbase/meta/1588230740 2024-11-17T15:29:38,150 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/data/hbase/meta/1588230740 2024-11-17T15:29:38,152 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T15:29:38,152 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T15:29:38,153 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T15:29:38,154 INFO [RS:0;7a780d55532c:35033 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-17T15:29:38,154 INFO [RS:0;7a780d55532c:35033 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-17T15:29:38,154 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T15:29:38,154 INFO [RS:0;7a780d55532c:35033 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:38,155 INFO [RS:0;7a780d55532c:35033 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-17T15:29:38,156 INFO [RS:0;7a780d55532c:35033 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-17T15:29:38,156 INFO [RS:0;7a780d55532c:35033 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:38,157 DEBUG [RS:0;7a780d55532c:35033 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:38,157 DEBUG [RS:0;7a780d55532c:35033 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:38,157 DEBUG [RS:0;7a780d55532c:35033 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:38,157 DEBUG [RS:0;7a780d55532c:35033 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:38,157 DEBUG [RS:0;7a780d55532c:35033 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:38,157 DEBUG [RS:0;7a780d55532c:35033 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7a780d55532c:0, corePoolSize=2, maxPoolSize=2 2024-11-17T15:29:38,157 DEBUG [RS:0;7a780d55532c:35033 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:38,157 DEBUG [RS:0;7a780d55532c:35033 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:38,158 DEBUG [RS:0;7a780d55532c:35033 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:38,158 DEBUG [RS:0;7a780d55532c:35033 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:38,158 DEBUG [RS:0;7a780d55532c:35033 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:38,158 DEBUG [RS:0;7a780d55532c:35033 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:38,158 DEBUG [RS:0;7a780d55532c:35033 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7a780d55532c:0, corePoolSize=3, maxPoolSize=3 2024-11-17T15:29:38,158 DEBUG [RS:0;7a780d55532c:35033 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0, corePoolSize=3, maxPoolSize=3 2024-11-17T15:29:38,159 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T15:29:38,159 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=802613, jitterRate=0.020576462149620056}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T15:29:38,161 INFO [RS:0;7a780d55532c:35033 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:38,161 INFO [RS:0;7a780d55532c:35033 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:38,161 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731857378133Initializing all the Stores at 1731857378134 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857378134Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857378136 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857378136Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857378136Cleaning up temporary data from old regions at 1731857378152 (+16 ms)Region opened successfully at 1731857378161 (+9 ms) 2024-11-17T15:29:38,161 INFO [RS:0;7a780d55532c:35033 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:38,161 INFO [RS:0;7a780d55532c:35033 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:38,161 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T15:29:38,161 INFO [RS:0;7a780d55532c:35033 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:38,161 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T15:29:38,161 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T15:29:38,161 INFO [RS:0;7a780d55532c:35033 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,35033,1731857377879-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T15:29:38,161 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T15:29:38,161 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T15:29:38,167 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T15:29:38,167 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731857378161Disabling compacts and flushes for region at 1731857378161Disabling writes for close at 1731857378161Writing region close event to WAL at 1731857378167 (+6 ms)Closed at 1731857378167 2024-11-17T15:29:38,169 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T15:29:38,169 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-17T15:29:38,169 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-17T15:29:38,171 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T15:29:38,173 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-17T15:29:38,185 INFO [RS:0;7a780d55532c:35033 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-17T15:29:38,185 INFO [RS:0;7a780d55532c:35033 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,35033,1731857377879-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:38,185 INFO [RS:0;7a780d55532c:35033 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:38,185 INFO [RS:0;7a780d55532c:35033 {}] regionserver.Replication(171): 7a780d55532c,35033,1731857377879 started 2024-11-17T15:29:38,204 INFO [RS:0;7a780d55532c:35033 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:38,204 INFO [RS:0;7a780d55532c:35033 {}] regionserver.HRegionServer(1482): Serving as 7a780d55532c,35033,1731857377879, RpcServer on 7a780d55532c/172.17.0.2:35033, sessionid=0x101268c549f0001 2024-11-17T15:29:38,204 DEBUG [RS:0;7a780d55532c:35033 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-17T15:29:38,204 DEBUG [RS:0;7a780d55532c:35033 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7a780d55532c,35033,1731857377879 2024-11-17T15:29:38,204 DEBUG [RS:0;7a780d55532c:35033 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a780d55532c,35033,1731857377879' 2024-11-17T15:29:38,204 DEBUG [RS:0;7a780d55532c:35033 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-17T15:29:38,205 DEBUG [RS:0;7a780d55532c:35033 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-17T15:29:38,206 DEBUG [RS:0;7a780d55532c:35033 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-17T15:29:38,206 DEBUG [RS:0;7a780d55532c:35033 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-17T15:29:38,206 DEBUG [RS:0;7a780d55532c:35033 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7a780d55532c,35033,1731857377879 2024-11-17T15:29:38,206 DEBUG [RS:0;7a780d55532c:35033 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a780d55532c,35033,1731857377879' 2024-11-17T15:29:38,206 DEBUG [RS:0;7a780d55532c:35033 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-17T15:29:38,206 DEBUG [RS:0;7a780d55532c:35033 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-17T15:29:38,207 DEBUG [RS:0;7a780d55532c:35033 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-17T15:29:38,207 INFO [RS:0;7a780d55532c:35033 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-17T15:29:38,207 INFO [RS:0;7a780d55532c:35033 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-17T15:29:38,309 INFO [RS:0;7a780d55532c:35033 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a780d55532c%2C35033%2C1731857377879, suffix=, logDir=hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/WALs/7a780d55532c,35033,1731857377879, archiveDir=hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/oldWALs, maxLogs=32 2024-11-17T15:29:38,311 INFO [RS:0;7a780d55532c:35033 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C35033%2C1731857377879.1731857378311 2024-11-17T15:29:38,319 INFO [RS:0;7a780d55532c:35033 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/WALs/7a780d55532c,35033,1731857377879/7a780d55532c%2C35033%2C1731857377879.1731857378311 2024-11-17T15:29:38,324 WARN [7a780d55532c:37363 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-17T15:29:38,324 DEBUG [RS:0;7a780d55532c:35033 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36233:36233),(127.0.0.1/127.0.0.1:36219:36219)] 2024-11-17T15:29:38,574 DEBUG [7a780d55532c:37363 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-17T15:29:38,575 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7a780d55532c,35033,1731857377879 2024-11-17T15:29:38,577 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7a780d55532c,35033,1731857377879, state=OPENING 2024-11-17T15:29:38,579 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-17T15:29:38,581 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35033-0x101268c549f0001, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:29:38,581 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37363-0x101268c549f0000, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:29:38,581 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T15:29:38,581 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T15:29:38,581 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7a780d55532c,35033,1731857377879}] 2024-11-17T15:29:38,582 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T15:29:38,735 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-17T15:29:38,738 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33121, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-17T15:29:38,743 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-17T15:29:38,743 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T15:29:38,746 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a780d55532c%2C35033%2C1731857377879.meta, suffix=.meta, logDir=hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/WALs/7a780d55532c,35033,1731857377879, archiveDir=hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/oldWALs, maxLogs=32 2024-11-17T15:29:38,748 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C35033%2C1731857377879.meta.1731857378747.meta 2024-11-17T15:29:38,755 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/WALs/7a780d55532c,35033,1731857377879/7a780d55532c%2C35033%2C1731857377879.meta.1731857378747.meta 2024-11-17T15:29:38,756 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36219:36219),(127.0.0.1/127.0.0.1:36233:36233)] 2024-11-17T15:29:38,757 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-17T15:29:38,757 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-17T15:29:38,757 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-17T15:29:38,757 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-17T15:29:38,758 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-17T15:29:38,758 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:29:38,758 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-17T15:29:38,758 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-17T15:29:38,760 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T15:29:38,761 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T15:29:38,761 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:29:38,762 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:29:38,762 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T15:29:38,763 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T15:29:38,763 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:29:38,763 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:29:38,764 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T15:29:38,764 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T15:29:38,764 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:29:38,765 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:29:38,765 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T15:29:38,766 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T15:29:38,766 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:29:38,766 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:29:38,767 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T15:29:38,767 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/data/hbase/meta/1588230740 2024-11-17T15:29:38,769 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/data/hbase/meta/1588230740 2024-11-17T15:29:38,770 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T15:29:38,770 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T15:29:38,771 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T15:29:38,773 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T15:29:38,774 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=882405, jitterRate=0.12203632295131683}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T15:29:38,774 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-17T15:29:38,775 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731857378758Writing region info on filesystem at 1731857378758Initializing all the Stores at 1731857378759 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857378759Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857378759Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857378759Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857378759Cleaning up temporary data from old regions at 1731857378770 (+11 ms)Running coprocessor post-open hooks at 1731857378774 (+4 ms)Region opened successfully at 1731857378775 (+1 ms) 2024-11-17T15:29:38,776 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731857378735 2024-11-17T15:29:38,779 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-17T15:29:38,779 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-17T15:29:38,780 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7a780d55532c,35033,1731857377879 2024-11-17T15:29:38,783 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7a780d55532c,35033,1731857377879, state=OPEN 2024-11-17T15:29:38,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35033-0x101268c549f0001, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T15:29:38,789 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7a780d55532c,35033,1731857377879 2024-11-17T15:29:38,789 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T15:29:38,790 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37363-0x101268c549f0000, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T15:29:38,790 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T15:29:38,793 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-17T15:29:38,793 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7a780d55532c,35033,1731857377879 in 208 msec 2024-11-17T15:29:38,796 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-17T15:29:38,796 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 624 msec 2024-11-17T15:29:38,797 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T15:29:38,798 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-17T15:29:38,800 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T15:29:38,800 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7a780d55532c,35033,1731857377879, seqNum=-1] 2024-11-17T15:29:38,800 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T15:29:38,802 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50101, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T15:29:38,809 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 703 msec 2024-11-17T15:29:38,809 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731857378809, completionTime=-1 2024-11-17T15:29:38,809 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-17T15:29:38,809 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-17T15:29:38,811 INFO [master/7a780d55532c:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-17T15:29:38,811 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731857438811 2024-11-17T15:29:38,812 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731857498812 2024-11-17T15:29:38,812 INFO [master/7a780d55532c:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-17T15:29:38,812 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,37363,1731857377825-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:38,812 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,37363,1731857377825-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:38,812 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,37363,1731857377825-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:38,812 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7a780d55532c:37363, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:38,812 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:38,812 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:38,815 DEBUG [master/7a780d55532c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-17T15:29:38,818 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.890sec 2024-11-17T15:29:38,818 INFO [master/7a780d55532c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-17T15:29:38,818 INFO [master/7a780d55532c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-17T15:29:38,818 INFO [master/7a780d55532c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-17T15:29:38,818 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-17T15:29:38,818 INFO [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-17T15:29:38,818 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,37363,1731857377825-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T15:29:38,818 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,37363,1731857377825-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-17T15:29:38,822 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-17T15:29:38,822 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-17T15:29:38,822 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,37363,1731857377825-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:38,907 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14cc45e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T15:29:38,907 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7a780d55532c,37363,-1 for getting cluster id 2024-11-17T15:29:38,907 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-17T15:29:38,909 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '4cd2834f-a72a-4c19-8ef1-df5a9e6b7950' 2024-11-17T15:29:38,910 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-17T15:29:38,910 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "4cd2834f-a72a-4c19-8ef1-df5a9e6b7950" 2024-11-17T15:29:38,910 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72aebd01, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T15:29:38,910 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7a780d55532c,37363,-1] 2024-11-17T15:29:38,911 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-17T15:29:38,911 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:29:38,913 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56466, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-17T15:29:38,914 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50d05b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T15:29:38,915 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T15:29:38,916 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7a780d55532c,35033,1731857377879, seqNum=-1] 2024-11-17T15:29:38,917 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T15:29:38,918 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52900, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T15:29:38,920 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7a780d55532c,37363,1731857377825 2024-11-17T15:29:38,921 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:29:38,925 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-17T15:29:38,925 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-17T15:29:38,925 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T15:29:38,926 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T15:29:38,926 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:29:38,926 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:29:38,926 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-17T15:29:38,926 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-17T15:29:38,926 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=292436729, stopped=false 2024-11-17T15:29:38,926 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7a780d55532c,37363,1731857377825 2024-11-17T15:29:38,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35033-0x101268c549f0001, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T15:29:38,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37363-0x101268c549f0000, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T15:29:38,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35033-0x101268c549f0001, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:29:38,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37363-0x101268c549f0000, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:29:38,929 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T15:29:38,929 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T15:29:38,929 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T15:29:38,929 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:29:38,930 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37363-0x101268c549f0000, quorum=127.0.0.1:61433, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T15:29:38,930 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7a780d55532c,35033,1731857377879' ***** 2024-11-17T15:29:38,930 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-17T15:29:38,930 INFO [RS:0;7a780d55532c:35033 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-17T15:29:38,930 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35033-0x101268c549f0001, quorum=127.0.0.1:61433, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T15:29:38,930 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-17T15:29:38,931 INFO [RS:0;7a780d55532c:35033 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-17T15:29:38,931 INFO [RS:0;7a780d55532c:35033 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-17T15:29:38,931 INFO [RS:0;7a780d55532c:35033 {}] regionserver.HRegionServer(959): stopping server 7a780d55532c,35033,1731857377879 2024-11-17T15:29:38,931 INFO [RS:0;7a780d55532c:35033 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T15:29:38,931 INFO [RS:0;7a780d55532c:35033 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7a780d55532c:35033. 2024-11-17T15:29:38,931 DEBUG [RS:0;7a780d55532c:35033 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T15:29:38,931 DEBUG [RS:0;7a780d55532c:35033 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:29:38,931 INFO [RS:0;7a780d55532c:35033 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-17T15:29:38,931 INFO [RS:0;7a780d55532c:35033 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-17T15:29:38,931 INFO [RS:0;7a780d55532c:35033 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-17T15:29:38,931 INFO [RS:0;7a780d55532c:35033 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-17T15:29:38,932 INFO [RS:0;7a780d55532c:35033 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-17T15:29:38,932 DEBUG [RS:0;7a780d55532c:35033 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-17T15:29:38,932 DEBUG [RS:0;7a780d55532c:35033 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-17T15:29:38,932 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T15:29:38,932 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T15:29:38,932 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T15:29:38,932 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T15:29:38,932 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T15:29:38,932 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-17T15:29:38,951 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/data/hbase/meta/1588230740/.tmp/ns/729ac1e1d686488ba1387639037930ad is 43, key is default/ns:d/1731857378802/Put/seqid=0 2024-11-17T15:29:38,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37601 is added to blk_1073741835_1011 (size=5153) 2024-11-17T15:29:38,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42953 is added to blk_1073741835_1011 (size=5153) 2024-11-17T15:29:38,959 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/data/hbase/meta/1588230740/.tmp/ns/729ac1e1d686488ba1387639037930ad 2024-11-17T15:29:38,968 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/data/hbase/meta/1588230740/.tmp/ns/729ac1e1d686488ba1387639037930ad as hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/data/hbase/meta/1588230740/ns/729ac1e1d686488ba1387639037930ad 2024-11-17T15:29:38,975 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/data/hbase/meta/1588230740/ns/729ac1e1d686488ba1387639037930ad, entries=2, sequenceid=6, filesize=5.0 K 2024-11-17T15:29:38,977 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 45ms, sequenceid=6, compaction requested=false 2024-11-17T15:29:38,982 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-17T15:29:38,982 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T15:29:38,983 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T15:29:38,983 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731857378932Running coprocessor pre-close hooks at 1731857378932Disabling compacts and flushes for region at 1731857378932Disabling writes for close at 1731857378932Obtaining lock to block concurrent updates at 1731857378932Preparing flush snapshotting stores in 1588230740 at 1731857378932Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731857378933 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731857378934 (+1 ms)Flushing 1588230740/ns: creating writer at 1731857378934Flushing 1588230740/ns: appending metadata at 1731857378950 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1731857378951 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6de0a9d3: reopening flushed file at 1731857378967 (+16 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 45ms, sequenceid=6, compaction requested=false at 1731857378977 (+10 ms)Writing region close event to WAL at 1731857378978 (+1 ms)Running coprocessor post-close hooks at 1731857378982 (+4 ms)Closed at 1731857378983 (+1 ms) 2024-11-17T15:29:38,983 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-17T15:29:39,132 INFO [RS:0;7a780d55532c:35033 {}] regionserver.HRegionServer(976): stopping server 7a780d55532c,35033,1731857377879; all regions closed. 2024-11-17T15:29:39,133 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:39,133 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:39,133 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:39,133 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:39,133 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:39,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37601 is added to blk_1073741834_1010 (size=1152) 2024-11-17T15:29:39,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42953 is added to blk_1073741834_1010 (size=1152) 2024-11-17T15:29:39,140 DEBUG [RS:0;7a780d55532c:35033 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/oldWALs 2024-11-17T15:29:39,140 INFO [RS:0;7a780d55532c:35033 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7a780d55532c%2C35033%2C1731857377879.meta:.meta(num 1731857378747) 2024-11-17T15:29:39,140 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:39,140 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:39,141 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:39,141 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:39,141 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:39,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42953 is added to blk_1073741833_1009 (size=93) 2024-11-17T15:29:39,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37601 is added to blk_1073741833_1009 (size=93) 2024-11-17T15:29:39,147 DEBUG [RS:0;7a780d55532c:35033 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/oldWALs 2024-11-17T15:29:39,147 INFO [RS:0;7a780d55532c:35033 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7a780d55532c%2C35033%2C1731857377879:(num 1731857378311) 2024-11-17T15:29:39,147 DEBUG [RS:0;7a780d55532c:35033 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:29:39,147 INFO [RS:0;7a780d55532c:35033 {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T15:29:39,147 INFO [RS:0;7a780d55532c:35033 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T15:29:39,147 INFO [RS:0;7a780d55532c:35033 {}] hbase.ChoreService(370): Chore service for: regionserver/7a780d55532c:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-17T15:29:39,148 INFO [RS:0;7a780d55532c:35033 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T15:29:39,148 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T15:29:39,148 INFO [RS:0;7a780d55532c:35033 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35033 2024-11-17T15:29:39,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37363-0x101268c549f0000, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T15:29:39,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35033-0x101268c549f0001, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7a780d55532c,35033,1731857377879 2024-11-17T15:29:39,150 INFO [RS:0;7a780d55532c:35033 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T15:29:39,151 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7a780d55532c,35033,1731857377879] 2024-11-17T15:29:39,154 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7a780d55532c,35033,1731857377879 already deleted, retry=false 2024-11-17T15:29:39,154 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7a780d55532c,35033,1731857377879 expired; onlineServers=0 2024-11-17T15:29:39,154 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7a780d55532c,37363,1731857377825' ***** 2024-11-17T15:29:39,154 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-17T15:29:39,154 INFO [M:0;7a780d55532c:37363 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T15:29:39,154 INFO [M:0;7a780d55532c:37363 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T15:29:39,155 DEBUG [M:0;7a780d55532c:37363 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-17T15:29:39,155 DEBUG [M:0;7a780d55532c:37363 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-17T15:29:39,155 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-17T15:29:39,155 DEBUG [master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.large.0-1731857378110 {}] cleaner.HFileCleaner(306): Exit Thread[master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.large.0-1731857378110,5,FailOnTimeoutGroup] 2024-11-17T15:29:39,155 INFO [M:0;7a780d55532c:37363 {}] hbase.ChoreService(370): Chore service for: master/7a780d55532c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-17T15:29:39,155 DEBUG [master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.small.0-1731857378111 {}] cleaner.HFileCleaner(306): Exit Thread[master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.small.0-1731857378111,5,FailOnTimeoutGroup] 2024-11-17T15:29:39,155 INFO [M:0;7a780d55532c:37363 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T15:29:39,155 DEBUG [M:0;7a780d55532c:37363 {}] master.HMaster(1795): Stopping service threads 2024-11-17T15:29:39,155 INFO [M:0;7a780d55532c:37363 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-17T15:29:39,155 INFO [M:0;7a780d55532c:37363 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T15:29:39,156 INFO [M:0;7a780d55532c:37363 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-17T15:29:39,156 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-17T15:29:39,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37363-0x101268c549f0000, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-17T15:29:39,157 DEBUG [M:0;7a780d55532c:37363 {}] zookeeper.ZKUtil(347): master:37363-0x101268c549f0000, quorum=127.0.0.1:61433, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-17T15:29:39,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37363-0x101268c549f0000, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:29:39,157 WARN [M:0;7a780d55532c:37363 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-17T15:29:39,158 INFO [M:0;7a780d55532c:37363 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/.lastflushedseqids 2024-11-17T15:29:39,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37601 is added to blk_1073741836_1012 (size=99) 2024-11-17T15:29:39,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42953 is added to blk_1073741836_1012 (size=99) 2024-11-17T15:29:39,168 INFO [M:0;7a780d55532c:37363 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-17T15:29:39,168 INFO [M:0;7a780d55532c:37363 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-17T15:29:39,168 DEBUG [M:0;7a780d55532c:37363 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T15:29:39,168 INFO [M:0;7a780d55532c:37363 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:29:39,168 DEBUG [M:0;7a780d55532c:37363 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:29:39,168 DEBUG [M:0;7a780d55532c:37363 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T15:29:39,168 DEBUG [M:0;7a780d55532c:37363 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:29:39,169 INFO [M:0;7a780d55532c:37363 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-17T15:29:39,187 DEBUG [M:0;7a780d55532c:37363 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ed40d02220764fcdaa74781777b6b3c1 is 82, key is hbase:meta,,1/info:regioninfo/1731857378780/Put/seqid=0 2024-11-17T15:29:39,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42953 is added to blk_1073741837_1013 (size=5672) 2024-11-17T15:29:39,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37601 is added to blk_1073741837_1013 (size=5672) 2024-11-17T15:29:39,198 INFO [M:0;7a780d55532c:37363 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ed40d02220764fcdaa74781777b6b3c1 2024-11-17T15:29:39,223 DEBUG [M:0;7a780d55532c:37363 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/11efd1e84959434cbb7df06dd22c942f is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731857378808/Put/seqid=0 2024-11-17T15:29:39,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42953 is added to blk_1073741838_1014 (size=5275) 2024-11-17T15:29:39,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37601 is added to blk_1073741838_1014 (size=5275) 2024-11-17T15:29:39,230 INFO [M:0;7a780d55532c:37363 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/11efd1e84959434cbb7df06dd22c942f 2024-11-17T15:29:39,253 INFO [RS:0;7a780d55532c:35033 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T15:29:39,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35033-0x101268c549f0001, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T15:29:39,253 INFO [RS:0;7a780d55532c:35033 {}] regionserver.HRegionServer(1031): Exiting; stopping=7a780d55532c,35033,1731857377879; zookeeper connection closed. 2024-11-17T15:29:39,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35033-0x101268c549f0001, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T15:29:39,253 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@56d4d339 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@56d4d339 2024-11-17T15:29:39,253 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-17T15:29:39,257 DEBUG [M:0;7a780d55532c:37363 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/37a07e18d4754be7bf3cb32a7381d913 is 69, key is 7a780d55532c,35033,1731857377879/rs:state/1731857378139/Put/seqid=0 2024-11-17T15:29:39,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37601 is added to blk_1073741839_1015 (size=5156) 2024-11-17T15:29:39,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42953 is added to blk_1073741839_1015 (size=5156) 2024-11-17T15:29:39,264 INFO [M:0;7a780d55532c:37363 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/37a07e18d4754be7bf3cb32a7381d913 2024-11-17T15:29:39,288 DEBUG [M:0;7a780d55532c:37363 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/97afe44b241b48e793c9630e9da0cb16 is 52, key is load_balancer_on/state:d/1731857378923/Put/seqid=0 2024-11-17T15:29:39,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37601 is added to blk_1073741840_1016 (size=5056) 2024-11-17T15:29:39,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42953 is added to blk_1073741840_1016 (size=5056) 2024-11-17T15:29:39,295 INFO [M:0;7a780d55532c:37363 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/97afe44b241b48e793c9630e9da0cb16 2024-11-17T15:29:39,303 DEBUG [M:0;7a780d55532c:37363 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ed40d02220764fcdaa74781777b6b3c1 as hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ed40d02220764fcdaa74781777b6b3c1 2024-11-17T15:29:39,310 INFO [M:0;7a780d55532c:37363 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ed40d02220764fcdaa74781777b6b3c1, entries=8, sequenceid=29, filesize=5.5 K 2024-11-17T15:29:39,311 DEBUG [M:0;7a780d55532c:37363 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/11efd1e84959434cbb7df06dd22c942f as hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/11efd1e84959434cbb7df06dd22c942f 2024-11-17T15:29:39,318 INFO [M:0;7a780d55532c:37363 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/11efd1e84959434cbb7df06dd22c942f, entries=3, sequenceid=29, filesize=5.2 K 2024-11-17T15:29:39,320 DEBUG [M:0;7a780d55532c:37363 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/37a07e18d4754be7bf3cb32a7381d913 as hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/37a07e18d4754be7bf3cb32a7381d913 2024-11-17T15:29:39,329 INFO [M:0;7a780d55532c:37363 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/37a07e18d4754be7bf3cb32a7381d913, entries=1, sequenceid=29, filesize=5.0 K 2024-11-17T15:29:39,330 DEBUG [M:0;7a780d55532c:37363 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/97afe44b241b48e793c9630e9da0cb16 as hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/97afe44b241b48e793c9630e9da0cb16 2024-11-17T15:29:39,337 INFO [M:0;7a780d55532c:37363 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45951/user/jenkins/test-data/09a5cdeb-c5fb-325c-2580-503688050361/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/97afe44b241b48e793c9630e9da0cb16, entries=1, sequenceid=29, filesize=4.9 K 2024-11-17T15:29:39,338 INFO [M:0;7a780d55532c:37363 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 170ms, sequenceid=29, compaction requested=false 2024-11-17T15:29:39,340 INFO [M:0;7a780d55532c:37363 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:29:39,340 DEBUG [M:0;7a780d55532c:37363 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731857379168Disabling compacts and flushes for region at 1731857379168Disabling writes for close at 1731857379168Obtaining lock to block concurrent updates at 1731857379169 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731857379169Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731857379169Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731857379170 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731857379170Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731857379186 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731857379187 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731857379205 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731857379222 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731857379222Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731857379236 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731857379256 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731857379257 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731857379270 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731857379288 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731857379288Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@349d69a4: reopening flushed file at 1731857379302 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6a5a5ecd: reopening flushed file at 1731857379310 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2bd6162a: reopening flushed file at 1731857379318 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@bd76caf: reopening flushed file at 1731857379329 (+11 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 170ms, sequenceid=29, compaction requested=false at 1731857379338 (+9 ms)Writing region close event to WAL at 1731857379340 (+2 ms)Closed at 1731857379340 2024-11-17T15:29:39,342 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:39,342 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:39,342 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:39,342 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:39,343 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:39,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37601 is added to blk_1073741830_1006 (size=10311) 2024-11-17T15:29:39,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42953 is added to blk_1073741830_1006 (size=10311) 2024-11-17T15:29:39,348 INFO [M:0;7a780d55532c:37363 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-17T15:29:39,348 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T15:29:39,348 INFO [M:0;7a780d55532c:37363 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37363 2024-11-17T15:29:39,348 INFO [M:0;7a780d55532c:37363 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T15:29:39,450 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37363-0x101268c549f0000, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T15:29:39,450 INFO [M:0;7a780d55532c:37363 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T15:29:39,450 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37363-0x101268c549f0000, quorum=127.0.0.1:61433, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T15:29:39,456 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@9acc548{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:29:39,456 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3509b1e6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T15:29:39,456 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T15:29:39,457 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4e477227{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T15:29:39,457 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@113235ce{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/hadoop.log.dir/,STOPPED} 2024-11-17T15:29:39,458 WARN [BP-417743486-172.17.0.2-1731857377077 heartbeating to localhost/127.0.0.1:45951 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T15:29:39,458 WARN [BP-417743486-172.17.0.2-1731857377077 heartbeating to localhost/127.0.0.1:45951 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-417743486-172.17.0.2-1731857377077 (Datanode Uuid 94407f10-db15-430f-9b07-a7fefda7508a) service to localhost/127.0.0.1:45951 2024-11-17T15:29:39,458 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T15:29:39,458 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T15:29:39,459 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/cluster_38d30a54-7e15-888c-9b9b-2d7e57dc3145/data/data3/current/BP-417743486-172.17.0.2-1731857377077 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:29:39,460 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/cluster_38d30a54-7e15-888c-9b9b-2d7e57dc3145/data/data4/current/BP-417743486-172.17.0.2-1731857377077 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:29:39,460 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T15:29:39,462 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5b15f3df{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:29:39,462 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@72a59bd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T15:29:39,462 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T15:29:39,463 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@39f44d3e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T15:29:39,463 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4de0900d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/hadoop.log.dir/,STOPPED} 2024-11-17T15:29:39,464 WARN [BP-417743486-172.17.0.2-1731857377077 heartbeating to localhost/127.0.0.1:45951 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T15:29:39,464 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T15:29:39,464 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T15:29:39,464 WARN [BP-417743486-172.17.0.2-1731857377077 heartbeating to localhost/127.0.0.1:45951 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-417743486-172.17.0.2-1731857377077 (Datanode Uuid a0305420-1d88-496f-b2e3-36dd0f2419d2) service to localhost/127.0.0.1:45951 2024-11-17T15:29:39,465 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/cluster_38d30a54-7e15-888c-9b9b-2d7e57dc3145/data/data1/current/BP-417743486-172.17.0.2-1731857377077 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:29:39,465 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/cluster_38d30a54-7e15-888c-9b9b-2d7e57dc3145/data/data2/current/BP-417743486-172.17.0.2-1731857377077 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:29:39,465 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T15:29:39,471 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@53521f59{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T15:29:39,471 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2fbdac8b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T15:29:39,472 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T15:29:39,472 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7a8c06e3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T15:29:39,472 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@a54e173{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/hadoop.log.dir/,STOPPED} 2024-11-17T15:29:39,478 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-17T15:29:39,495 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-17T15:29:39,495 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-17T15:29:39,495 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/hadoop.log.dir so I do NOT create it in target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b 2024-11-17T15:29:39,495 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c44a3ad2-979a-e73e-c6ad-31c6566e1a89/hadoop.tmp.dir so I do NOT create it in target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b 2024-11-17T15:29:39,495 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca, deleteOnExit=true 2024-11-17T15:29:39,495 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-17T15:29:39,496 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/test.cache.data in system properties and HBase conf 2024-11-17T15:29:39,496 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/hadoop.tmp.dir in system properties and HBase conf 2024-11-17T15:29:39,496 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/hadoop.log.dir in system properties and HBase conf 2024-11-17T15:29:39,496 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-17T15:29:39,496 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-17T15:29:39,496 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-17T15:29:39,496 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-17T15:29:39,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-17T15:29:39,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-17T15:29:39,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-17T15:29:39,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T15:29:39,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-17T15:29:39,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-17T15:29:39,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T15:29:39,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T15:29:39,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-17T15:29:39,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/nfs.dump.dir in system properties and HBase conf 2024-11-17T15:29:39,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/java.io.tmpdir in system properties and HBase conf 2024-11-17T15:29:39,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T15:29:39,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-17T15:29:39,498 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-17T15:29:39,512 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T15:29:39,587 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:29:39,592 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T15:29:39,593 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T15:29:39,593 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T15:29:39,593 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T15:29:39,594 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:29:39,594 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@302502f5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/hadoop.log.dir/,AVAILABLE} 2024-11-17T15:29:39,595 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6cef6566{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T15:29:39,711 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@165c5bf1{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/java.io.tmpdir/jetty-localhost-40735-hadoop-hdfs-3_4_1-tests_jar-_-any-10801137872529285906/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T15:29:39,711 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@73e293ca{HTTP/1.1, (http/1.1)}{localhost:40735} 2024-11-17T15:29:39,711 INFO [Time-limited test {}] server.Server(415): Started @103633ms 2024-11-17T15:29:39,725 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T15:29:39,806 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:29:39,810 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T15:29:39,811 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T15:29:39,811 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T15:29:39,811 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T15:29:39,811 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@a95d0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/hadoop.log.dir/,AVAILABLE} 2024-11-17T15:29:39,812 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6b94495b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T15:29:39,948 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@295bc59e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/java.io.tmpdir/jetty-localhost-43181-hadoop-hdfs-3_4_1-tests_jar-_-any-6239087581356721533/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:29:39,949 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@517c4920{HTTP/1.1, (http/1.1)}{localhost:43181} 2024-11-17T15:29:39,949 INFO [Time-limited test {}] server.Server(415): Started @103871ms 2024-11-17T15:29:39,952 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T15:29:40,029 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:29:40,033 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T15:29:40,038 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T15:29:40,038 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T15:29:40,038 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T15:29:40,039 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5c5cbc59{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/hadoop.log.dir/,AVAILABLE} 2024-11-17T15:29:40,039 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@56aeb798{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T15:29:40,089 WARN [Thread-656 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data1/current/BP-398811610-172.17.0.2-1731857379530/current, will proceed with Du for space computation calculation, 2024-11-17T15:29:40,089 WARN [Thread-657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data2/current/BP-398811610-172.17.0.2-1731857379530/current, will proceed with Du for space computation calculation, 2024-11-17T15:29:40,117 WARN [Thread-635 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T15:29:40,120 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4c8377d618cb3993 with lease ID 0x6bf99bb86d5c7785: Processing first storage report for DS-afe86fda-d881-4740-8700-829d80269652 from datanode DatanodeRegistration(127.0.0.1:34067, datanodeUuid=59b0f98c-0267-4075-b69d-ab99564b63dc, infoPort=37089, infoSecurePort=0, ipcPort=38563, storageInfo=lv=-57;cid=testClusterID;nsid=378286705;c=1731857379530) 2024-11-17T15:29:40,121 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4c8377d618cb3993 with lease ID 0x6bf99bb86d5c7785: from storage DS-afe86fda-d881-4740-8700-829d80269652 node DatanodeRegistration(127.0.0.1:34067, datanodeUuid=59b0f98c-0267-4075-b69d-ab99564b63dc, infoPort=37089, infoSecurePort=0, ipcPort=38563, storageInfo=lv=-57;cid=testClusterID;nsid=378286705;c=1731857379530), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-17T15:29:40,121 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4c8377d618cb3993 with lease ID 0x6bf99bb86d5c7785: Processing first storage report for DS-78312a04-f064-4c90-978a-d46e9019b10e from datanode DatanodeRegistration(127.0.0.1:34067, datanodeUuid=59b0f98c-0267-4075-b69d-ab99564b63dc, infoPort=37089, infoSecurePort=0, ipcPort=38563, storageInfo=lv=-57;cid=testClusterID;nsid=378286705;c=1731857379530) 2024-11-17T15:29:40,121 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4c8377d618cb3993 with lease ID 0x6bf99bb86d5c7785: from storage DS-78312a04-f064-4c90-978a-d46e9019b10e node DatanodeRegistration(127.0.0.1:34067, datanodeUuid=59b0f98c-0267-4075-b69d-ab99564b63dc, infoPort=37089, infoSecurePort=0, ipcPort=38563, storageInfo=lv=-57;cid=testClusterID;nsid=378286705;c=1731857379530), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T15:29:40,164 INFO [regionserver/7a780d55532c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T15:29:40,172 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@707b5b0d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/java.io.tmpdir/jetty-localhost-46449-hadoop-hdfs-3_4_1-tests_jar-_-any-17656773882562668726/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:29:40,172 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5ff23317{HTTP/1.1, (http/1.1)}{localhost:46449} 2024-11-17T15:29:40,172 INFO [Time-limited test {}] server.Server(415): Started @104094ms 2024-11-17T15:29:40,174 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T15:29:40,305 WARN [Thread-682 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data3/current/BP-398811610-172.17.0.2-1731857379530/current, will proceed with Du for space computation calculation, 2024-11-17T15:29:40,305 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data4/current/BP-398811610-172.17.0.2-1731857379530/current, will proceed with Du for space computation calculation, 2024-11-17T15:29:40,323 WARN [Thread-671 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T15:29:40,326 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xee92f5435fea43b1 with lease ID 0x6bf99bb86d5c7786: Processing first storage report for DS-e57b16a9-d350-4424-9cda-497c30bb93e9 from datanode DatanodeRegistration(127.0.0.1:41873, datanodeUuid=cad15bd0-c8d1-4c46-ba45-a08acfe53d98, infoPort=33713, infoSecurePort=0, ipcPort=39929, storageInfo=lv=-57;cid=testClusterID;nsid=378286705;c=1731857379530) 2024-11-17T15:29:40,326 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xee92f5435fea43b1 with lease ID 0x6bf99bb86d5c7786: from storage DS-e57b16a9-d350-4424-9cda-497c30bb93e9 node DatanodeRegistration(127.0.0.1:41873, datanodeUuid=cad15bd0-c8d1-4c46-ba45-a08acfe53d98, infoPort=33713, infoSecurePort=0, ipcPort=39929, storageInfo=lv=-57;cid=testClusterID;nsid=378286705;c=1731857379530), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T15:29:40,326 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xee92f5435fea43b1 with lease ID 0x6bf99bb86d5c7786: Processing first storage report for DS-c5886253-11c7-46b4-880b-716675bbbc50 from datanode DatanodeRegistration(127.0.0.1:41873, datanodeUuid=cad15bd0-c8d1-4c46-ba45-a08acfe53d98, infoPort=33713, infoSecurePort=0, ipcPort=39929, storageInfo=lv=-57;cid=testClusterID;nsid=378286705;c=1731857379530) 2024-11-17T15:29:40,327 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xee92f5435fea43b1 with lease ID 0x6bf99bb86d5c7786: from storage DS-c5886253-11c7-46b4-880b-716675bbbc50 node DatanodeRegistration(127.0.0.1:41873, datanodeUuid=cad15bd0-c8d1-4c46-ba45-a08acfe53d98, infoPort=33713, infoSecurePort=0, ipcPort=39929, storageInfo=lv=-57;cid=testClusterID;nsid=378286705;c=1731857379530), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T15:29:40,402 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b 2024-11-17T15:29:40,404 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/zookeeper_0, clientPort=53267, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-17T15:29:40,405 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=53267 2024-11-17T15:29:40,406 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:29:40,408 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:29:40,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34067 is added to blk_1073741825_1001 (size=7) 2024-11-17T15:29:40,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41873 is added to blk_1073741825_1001 (size=7) 2024-11-17T15:29:40,419 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac with version=8 2024-11-17T15:29:40,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/hbase-staging 2024-11-17T15:29:40,422 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7a780d55532c:0 server-side Connection retries=45 2024-11-17T15:29:40,422 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T15:29:40,422 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T15:29:40,422 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T15:29:40,422 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T15:29:40,422 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T15:29:40,423 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-17T15:29:40,423 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T15:29:40,424 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33923 2024-11-17T15:29:40,425 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33923 connecting to ZooKeeper ensemble=127.0.0.1:53267 2024-11-17T15:29:40,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:339230x0, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T15:29:40,438 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33923-0x101268c5ec30000 connected 2024-11-17T15:29:40,466 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:29:40,468 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:29:40,471 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33923-0x101268c5ec30000, quorum=127.0.0.1:53267, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T15:29:40,471 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac, hbase.cluster.distributed=false 2024-11-17T15:29:40,473 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33923-0x101268c5ec30000, quorum=127.0.0.1:53267, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T15:29:40,483 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33923 2024-11-17T15:29:40,483 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33923 2024-11-17T15:29:40,484 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33923 2024-11-17T15:29:40,488 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33923 2024-11-17T15:29:40,490 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33923 2024-11-17T15:29:40,513 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7a780d55532c:0 server-side Connection retries=45 2024-11-17T15:29:40,513 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T15:29:40,514 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T15:29:40,514 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T15:29:40,514 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T15:29:40,514 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T15:29:40,514 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-17T15:29:40,514 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T15:29:40,515 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38057 2024-11-17T15:29:40,518 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38057 connecting to ZooKeeper ensemble=127.0.0.1:53267 2024-11-17T15:29:40,519 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:29:40,522 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:29:40,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:380570x0, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T15:29:40,530 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38057-0x101268c5ec30001 connected 2024-11-17T15:29:40,531 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38057-0x101268c5ec30001, quorum=127.0.0.1:53267, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T15:29:40,531 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-17T15:29:40,536 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-17T15:29:40,537 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38057-0x101268c5ec30001, quorum=127.0.0.1:53267, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-17T15:29:40,539 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38057-0x101268c5ec30001, quorum=127.0.0.1:53267, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T15:29:40,545 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38057 2024-11-17T15:29:40,545 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38057 2024-11-17T15:29:40,545 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38057 2024-11-17T15:29:40,552 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38057 2024-11-17T15:29:40,553 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38057 2024-11-17T15:29:40,568 DEBUG [M:0;7a780d55532c:33923 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7a780d55532c:33923 2024-11-17T15:29:40,569 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7a780d55532c,33923,1731857380422 2024-11-17T15:29:40,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38057-0x101268c5ec30001, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T15:29:40,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33923-0x101268c5ec30000, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T15:29:40,576 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33923-0x101268c5ec30000, quorum=127.0.0.1:53267, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7a780d55532c,33923,1731857380422 2024-11-17T15:29:40,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38057-0x101268c5ec30001, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-17T15:29:40,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33923-0x101268c5ec30000, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:29:40,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38057-0x101268c5ec30001, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:29:40,579 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33923-0x101268c5ec30000, quorum=127.0.0.1:53267, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-17T15:29:40,579 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7a780d55532c,33923,1731857380422 from backup master directory 2024-11-17T15:29:40,581 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38057-0x101268c5ec30001, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T15:29:40,581 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33923-0x101268c5ec30000, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7a780d55532c,33923,1731857380422 2024-11-17T15:29:40,581 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33923-0x101268c5ec30000, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T15:29:40,584 WARN [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T15:29:40,584 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7a780d55532c,33923,1731857380422 2024-11-17T15:29:40,589 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/hbase.id] with ID: 2700cc3b-5d9d-4265-b7be-e4e56f487f4a 2024-11-17T15:29:40,589 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/.tmp/hbase.id 2024-11-17T15:29:40,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41873 is added to blk_1073741826_1002 (size=42) 2024-11-17T15:29:40,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34067 is added to blk_1073741826_1002 (size=42) 2024-11-17T15:29:40,600 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/.tmp/hbase.id]:[hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/hbase.id] 2024-11-17T15:29:40,614 INFO [master/7a780d55532c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:29:40,615 INFO [master/7a780d55532c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-17T15:29:40,616 INFO [master/7a780d55532c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-17T15:29:40,619 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38057-0x101268c5ec30001, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:29:40,619 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33923-0x101268c5ec30000, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:29:40,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34067 is added to blk_1073741827_1003 (size=196) 2024-11-17T15:29:40,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41873 is added to blk_1073741827_1003 (size=196) 2024-11-17T15:29:40,634 INFO [master/7a780d55532c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T15:29:40,635 INFO [master/7a780d55532c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-17T15:29:40,636 INFO [master/7a780d55532c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T15:29:40,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34067 is added to blk_1073741828_1004 (size=1189) 2024-11-17T15:29:40,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41873 is added to blk_1073741828_1004 (size=1189) 2024-11-17T15:29:40,657 INFO [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/data/master/store 2024-11-17T15:29:40,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34067 is added to blk_1073741829_1005 (size=34) 2024-11-17T15:29:40,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41873 is added to blk_1073741829_1005 (size=34) 2024-11-17T15:29:40,673 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:29:40,673 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T15:29:40,674 INFO [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:29:40,674 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:29:40,674 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T15:29:40,674 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:29:40,674 INFO [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:29:40,674 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731857380673Disabling compacts and flushes for region at 1731857380673Disabling writes for close at 1731857380674 (+1 ms)Writing region close event to WAL at 1731857380674Closed at 1731857380674 2024-11-17T15:29:40,676 WARN [master/7a780d55532c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/data/master/store/.initializing 2024-11-17T15:29:40,676 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/WALs/7a780d55532c,33923,1731857380422 2024-11-17T15:29:40,683 INFO [master/7a780d55532c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a780d55532c%2C33923%2C1731857380422, suffix=, logDir=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/WALs/7a780d55532c,33923,1731857380422, archiveDir=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/oldWALs, maxLogs=10 2024-11-17T15:29:40,684 INFO [master/7a780d55532c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C33923%2C1731857380422.1731857380683 2024-11-17T15:29:40,694 INFO [master/7a780d55532c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/WALs/7a780d55532c,33923,1731857380422/7a780d55532c%2C33923%2C1731857380422.1731857380683 2024-11-17T15:29:40,701 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33713:33713),(127.0.0.1/127.0.0.1:37089:37089)] 2024-11-17T15:29:40,705 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-17T15:29:40,706 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:29:40,706 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:29:40,706 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:29:40,708 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:29:40,711 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-17T15:29:40,711 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:29:40,712 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:29:40,712 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:29:40,714 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-17T15:29:40,714 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:29:40,715 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T15:29:40,715 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:29:40,717 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-17T15:29:40,717 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:29:40,718 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T15:29:40,718 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:29:40,720 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-17T15:29:40,720 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:29:40,721 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T15:29:40,721 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:29:40,722 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:29:40,723 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:29:40,725 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:29:40,725 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:29:40,726 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-17T15:29:40,728 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:29:40,733 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T15:29:40,734 INFO [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=746988, jitterRate=-0.05015672743320465}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-17T15:29:40,736 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731857380706Initializing all the Stores at 1731857380708 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857380708Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857380708Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857380708Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857380708Cleaning up temporary data from old regions at 1731857380725 (+17 ms)Region opened successfully at 1731857380735 (+10 ms) 2024-11-17T15:29:40,736 INFO [master/7a780d55532c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-17T15:29:40,741 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d819f54, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7a780d55532c/172.17.0.2:0 2024-11-17T15:29:40,743 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-17T15:29:40,743 INFO [master/7a780d55532c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-17T15:29:40,743 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-17T15:29:40,743 INFO [master/7a780d55532c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-17T15:29:40,744 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-17T15:29:40,745 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-17T15:29:40,745 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-17T15:29:40,752 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-17T15:29:40,753 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33923-0x101268c5ec30000, quorum=127.0.0.1:53267, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-17T15:29:40,756 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-17T15:29:40,756 INFO [master/7a780d55532c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-17T15:29:40,757 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33923-0x101268c5ec30000, quorum=127.0.0.1:53267, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-17T15:29:40,760 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-17T15:29:40,760 INFO [master/7a780d55532c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-17T15:29:40,761 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33923-0x101268c5ec30000, quorum=127.0.0.1:53267, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-17T15:29:40,763 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-17T15:29:40,764 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33923-0x101268c5ec30000, quorum=127.0.0.1:53267, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-17T15:29:40,766 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-17T15:29:40,769 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33923-0x101268c5ec30000, quorum=127.0.0.1:53267, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-17T15:29:40,773 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-17T15:29:40,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38057-0x101268c5ec30001, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T15:29:40,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33923-0x101268c5ec30000, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T15:29:40,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38057-0x101268c5ec30001, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:29:40,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33923-0x101268c5ec30000, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:29:40,777 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7a780d55532c,33923,1731857380422, sessionid=0x101268c5ec30000, setting cluster-up flag (Was=false) 2024-11-17T15:29:40,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33923-0x101268c5ec30000, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:29:40,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38057-0x101268c5ec30001, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:29:40,788 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-17T15:29:40,789 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7a780d55532c,33923,1731857380422 2024-11-17T15:29:40,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33923-0x101268c5ec30000, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:29:40,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38057-0x101268c5ec30001, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:29:40,800 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-17T15:29:40,801 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7a780d55532c,33923,1731857380422 2024-11-17T15:29:40,803 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-17T15:29:40,806 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-17T15:29:40,806 INFO [master/7a780d55532c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-17T15:29:40,806 INFO [master/7a780d55532c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-17T15:29:40,807 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7a780d55532c,33923,1731857380422 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-17T15:29:40,808 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7a780d55532c:0, corePoolSize=5, maxPoolSize=5 2024-11-17T15:29:40,808 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7a780d55532c:0, corePoolSize=5, maxPoolSize=5 2024-11-17T15:29:40,809 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7a780d55532c:0, corePoolSize=5, maxPoolSize=5 2024-11-17T15:29:40,809 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7a780d55532c:0, corePoolSize=5, maxPoolSize=5 2024-11-17T15:29:40,809 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7a780d55532c:0, corePoolSize=10, maxPoolSize=10 2024-11-17T15:29:40,809 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:40,809 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7a780d55532c:0, corePoolSize=2, maxPoolSize=2 2024-11-17T15:29:40,809 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:40,815 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T15:29:40,815 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-17T15:29:40,816 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:29:40,817 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-17T15:29:40,820 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731857410820 2024-11-17T15:29:40,821 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-17T15:29:40,821 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-17T15:29:40,821 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-17T15:29:40,821 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-17T15:29:40,821 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-17T15:29:40,821 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-17T15:29:40,827 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:40,831 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-17T15:29:40,831 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-17T15:29:40,832 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-17T15:29:40,832 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-17T15:29:40,832 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-17T15:29:40,836 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.large.0-1731857380833,5,FailOnTimeoutGroup] 2024-11-17T15:29:40,844 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.small.0-1731857380836,5,FailOnTimeoutGroup] 2024-11-17T15:29:40,844 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:40,845 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-17T15:29:40,845 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:40,845 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:40,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34067 is added to blk_1073741831_1007 (size=1321) 2024-11-17T15:29:40,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41873 is added to blk_1073741831_1007 (size=1321) 2024-11-17T15:29:40,847 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-17T15:29:40,847 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac 2024-11-17T15:29:40,858 INFO [RS:0;7a780d55532c:38057 {}] regionserver.HRegionServer(746): ClusterId : 2700cc3b-5d9d-4265-b7be-e4e56f487f4a 2024-11-17T15:29:40,858 DEBUG [RS:0;7a780d55532c:38057 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-17T15:29:40,863 DEBUG [RS:0;7a780d55532c:38057 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-17T15:29:40,863 DEBUG [RS:0;7a780d55532c:38057 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-17T15:29:40,867 DEBUG [RS:0;7a780d55532c:38057 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-17T15:29:40,870 DEBUG [RS:0;7a780d55532c:38057 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f9ddbc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7a780d55532c/172.17.0.2:0 2024-11-17T15:29:40,890 DEBUG [RS:0;7a780d55532c:38057 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7a780d55532c:38057 2024-11-17T15:29:40,890 INFO [RS:0;7a780d55532c:38057 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-17T15:29:40,890 INFO [RS:0;7a780d55532c:38057 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-17T15:29:40,890 DEBUG [RS:0;7a780d55532c:38057 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-17T15:29:40,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34067 is added to blk_1073741832_1008 (size=32) 2024-11-17T15:29:40,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41873 is added to blk_1073741832_1008 (size=32) 2024-11-17T15:29:40,891 INFO [RS:0;7a780d55532c:38057 {}] regionserver.HRegionServer(2659): reportForDuty to master=7a780d55532c,33923,1731857380422 with port=38057, startcode=1731857380513 2024-11-17T15:29:40,891 DEBUG [RS:0;7a780d55532c:38057 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-17T15:29:40,892 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:29:40,898 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57209, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-17T15:29:40,899 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33923 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7a780d55532c,38057,1731857380513 2024-11-17T15:29:40,899 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33923 {}] master.ServerManager(517): Registering regionserver=7a780d55532c,38057,1731857380513 2024-11-17T15:29:40,901 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T15:29:40,901 DEBUG [RS:0;7a780d55532c:38057 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac 2024-11-17T15:29:40,901 DEBUG [RS:0;7a780d55532c:38057 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42915 2024-11-17T15:29:40,901 DEBUG [RS:0;7a780d55532c:38057 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-17T15:29:40,903 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T15:29:40,903 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:29:40,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33923-0x101268c5ec30000, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T15:29:40,905 DEBUG [RS:0;7a780d55532c:38057 {}] zookeeper.ZKUtil(111): regionserver:38057-0x101268c5ec30001, quorum=127.0.0.1:53267, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7a780d55532c,38057,1731857380513 2024-11-17T15:29:40,905 WARN [RS:0;7a780d55532c:38057 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T15:29:40,905 INFO [RS:0;7a780d55532c:38057 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T15:29:40,905 DEBUG [RS:0;7a780d55532c:38057 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513 2024-11-17T15:29:40,909 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:29:40,909 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T15:29:40,911 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T15:29:40,911 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:29:40,912 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:29:40,912 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T15:29:40,914 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T15:29:40,914 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:29:40,914 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:29:40,914 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T15:29:40,916 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T15:29:40,916 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:29:40,917 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:29:40,917 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T15:29:40,917 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/hbase/meta/1588230740 2024-11-17T15:29:40,918 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/hbase/meta/1588230740 2024-11-17T15:29:40,920 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T15:29:40,920 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T15:29:40,920 INFO [RS:0;7a780d55532c:38057 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-17T15:29:40,921 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T15:29:40,922 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T15:29:40,924 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7a780d55532c,38057,1731857380513] 2024-11-17T15:29:40,924 INFO [RS:0;7a780d55532c:38057 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-17T15:29:40,928 INFO [RS:0;7a780d55532c:38057 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-17T15:29:40,929 INFO [RS:0;7a780d55532c:38057 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:40,929 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T15:29:40,930 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=779853, jitterRate=-0.008365899324417114}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T15:29:40,931 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731857380892Initializing all the Stores at 1731857380894 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857380894Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857380900 (+6 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857380900Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857380900Cleaning up temporary data from old regions at 1731857380920 (+20 ms)Region opened successfully at 1731857380931 (+11 ms) 2024-11-17T15:29:40,931 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T15:29:40,931 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T15:29:40,931 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T15:29:40,931 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T15:29:40,931 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T15:29:40,938 INFO [RS:0;7a780d55532c:38057 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-17T15:29:40,939 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T15:29:40,939 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731857380931Disabling compacts and flushes for region at 1731857380931Disabling writes for close at 1731857380931Writing region close event to WAL at 1731857380939 (+8 ms)Closed at 1731857380939 2024-11-17T15:29:40,940 INFO [RS:0;7a780d55532c:38057 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-17T15:29:40,940 INFO [RS:0;7a780d55532c:38057 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:40,940 DEBUG [RS:0;7a780d55532c:38057 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:40,940 DEBUG [RS:0;7a780d55532c:38057 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:40,940 DEBUG [RS:0;7a780d55532c:38057 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:40,941 DEBUG [RS:0;7a780d55532c:38057 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:40,941 DEBUG [RS:0;7a780d55532c:38057 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:40,941 DEBUG [RS:0;7a780d55532c:38057 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7a780d55532c:0, corePoolSize=2, maxPoolSize=2 2024-11-17T15:29:40,941 DEBUG [RS:0;7a780d55532c:38057 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:40,941 DEBUG [RS:0;7a780d55532c:38057 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:40,941 DEBUG [RS:0;7a780d55532c:38057 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:40,941 DEBUG [RS:0;7a780d55532c:38057 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:40,941 DEBUG [RS:0;7a780d55532c:38057 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:40,941 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T15:29:40,941 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-17T15:29:40,941 DEBUG [RS:0;7a780d55532c:38057 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:40,941 DEBUG [RS:0;7a780d55532c:38057 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7a780d55532c:0, corePoolSize=3, maxPoolSize=3 2024-11-17T15:29:40,941 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-17T15:29:40,941 DEBUG [RS:0;7a780d55532c:38057 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0, corePoolSize=3, maxPoolSize=3 2024-11-17T15:29:40,943 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T15:29:40,945 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-17T15:29:40,948 INFO [RS:0;7a780d55532c:38057 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:40,948 INFO [RS:0;7a780d55532c:38057 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:40,949 INFO [RS:0;7a780d55532c:38057 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:40,949 INFO [RS:0;7a780d55532c:38057 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:40,949 INFO [RS:0;7a780d55532c:38057 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:40,949 INFO [RS:0;7a780d55532c:38057 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,38057,1731857380513-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T15:29:40,970 INFO [RS:0;7a780d55532c:38057 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-17T15:29:40,970 INFO [RS:0;7a780d55532c:38057 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,38057,1731857380513-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:40,971 INFO [RS:0;7a780d55532c:38057 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:40,971 INFO [RS:0;7a780d55532c:38057 {}] regionserver.Replication(171): 7a780d55532c,38057,1731857380513 started 2024-11-17T15:29:40,986 INFO [RS:0;7a780d55532c:38057 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:40,986 INFO [RS:0;7a780d55532c:38057 {}] regionserver.HRegionServer(1482): Serving as 7a780d55532c,38057,1731857380513, RpcServer on 7a780d55532c/172.17.0.2:38057, sessionid=0x101268c5ec30001 2024-11-17T15:29:40,986 DEBUG [RS:0;7a780d55532c:38057 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-17T15:29:40,986 DEBUG [RS:0;7a780d55532c:38057 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7a780d55532c,38057,1731857380513 2024-11-17T15:29:40,986 DEBUG [RS:0;7a780d55532c:38057 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a780d55532c,38057,1731857380513' 2024-11-17T15:29:40,986 DEBUG [RS:0;7a780d55532c:38057 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-17T15:29:40,987 DEBUG [RS:0;7a780d55532c:38057 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-17T15:29:40,987 DEBUG [RS:0;7a780d55532c:38057 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-17T15:29:40,987 DEBUG [RS:0;7a780d55532c:38057 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-17T15:29:40,987 DEBUG [RS:0;7a780d55532c:38057 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7a780d55532c,38057,1731857380513 2024-11-17T15:29:40,987 DEBUG [RS:0;7a780d55532c:38057 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a780d55532c,38057,1731857380513' 2024-11-17T15:29:40,987 DEBUG [RS:0;7a780d55532c:38057 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-17T15:29:40,988 DEBUG [RS:0;7a780d55532c:38057 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-17T15:29:40,988 DEBUG [RS:0;7a780d55532c:38057 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-17T15:29:40,988 INFO [RS:0;7a780d55532c:38057 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-17T15:29:40,988 INFO [RS:0;7a780d55532c:38057 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-17T15:29:41,091 INFO [RS:0;7a780d55532c:38057 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a780d55532c%2C38057%2C1731857380513, suffix=, logDir=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513, archiveDir=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/oldWALs, maxLogs=32 2024-11-17T15:29:41,092 INFO [RS:0;7a780d55532c:38057 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C38057%2C1731857380513.1731857381092 2024-11-17T15:29:41,095 WARN [7a780d55532c:33923 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-17T15:29:41,102 INFO [RS:0;7a780d55532c:38057 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857381092 2024-11-17T15:29:41,135 DEBUG [RS:0;7a780d55532c:38057 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37089:37089),(127.0.0.1/127.0.0.1:33713:33713)] 2024-11-17T15:29:41,345 DEBUG [7a780d55532c:33923 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-17T15:29:41,346 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7a780d55532c,38057,1731857380513 2024-11-17T15:29:41,347 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7a780d55532c,38057,1731857380513, state=OPENING 2024-11-17T15:29:41,350 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-17T15:29:41,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33923-0x101268c5ec30000, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:29:41,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38057-0x101268c5ec30001, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:29:41,357 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T15:29:41,358 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7a780d55532c,38057,1731857380513}] 2024-11-17T15:29:41,358 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T15:29:41,359 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T15:29:41,379 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T15:29:41,380 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-17T15:29:41,380 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-17T15:29:41,512 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-17T15:29:41,514 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35749, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-17T15:29:41,520 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:29:41,520 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-17T15:29:41,520 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T15:29:41,523 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a780d55532c%2C38057%2C1731857380513.meta, suffix=.meta, logDir=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513, archiveDir=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/oldWALs, maxLogs=32 2024-11-17T15:29:41,524 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta 2024-11-17T15:29:41,524 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:29:41,541 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta 2024-11-17T15:29:41,554 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33713:33713),(127.0.0.1/127.0.0.1:37089:37089)] 2024-11-17T15:29:41,563 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-17T15:29:41,563 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-17T15:29:41,563 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-17T15:29:41,564 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-17T15:29:41,564 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-17T15:29:41,564 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:29:41,564 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-17T15:29:41,564 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-17T15:29:41,567 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T15:29:41,568 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T15:29:41,568 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:29:41,569 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:29:41,569 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T15:29:41,571 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T15:29:41,571 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:29:41,571 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:29:41,572 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T15:29:41,573 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T15:29:41,573 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:29:41,573 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:29:41,573 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T15:29:41,574 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T15:29:41,574 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:29:41,575 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:29:41,575 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T15:29:41,576 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/hbase/meta/1588230740 2024-11-17T15:29:41,578 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/hbase/meta/1588230740 2024-11-17T15:29:41,579 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T15:29:41,579 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T15:29:41,580 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T15:29:41,582 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T15:29:41,584 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=798604, jitterRate=0.015478283166885376}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T15:29:41,584 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-17T15:29:41,585 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731857381564Writing region info on filesystem at 1731857381564Initializing all the Stores at 1731857381566 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857381566Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857381566Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857381567 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857381567Cleaning up temporary data from old regions at 1731857381580 (+13 ms)Running coprocessor post-open hooks at 1731857381584 (+4 ms)Region opened successfully at 1731857381585 (+1 ms) 2024-11-17T15:29:41,586 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731857381511 2024-11-17T15:29:41,590 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-17T15:29:41,590 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-17T15:29:41,591 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7a780d55532c,38057,1731857380513 2024-11-17T15:29:41,593 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7a780d55532c,38057,1731857380513, state=OPEN 2024-11-17T15:29:41,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38057-0x101268c5ec30001, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T15:29:41,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33923-0x101268c5ec30000, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T15:29:41,600 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T15:29:41,600 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T15:29:41,600 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7a780d55532c,38057,1731857380513 2024-11-17T15:29:41,605 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-17T15:29:41,605 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7a780d55532c,38057,1731857380513 in 242 msec 2024-11-17T15:29:41,609 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-17T15:29:41,610 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 664 msec 2024-11-17T15:29:41,611 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T15:29:41,611 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-17T15:29:41,613 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T15:29:41,613 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7a780d55532c,38057,1731857380513, seqNum=-1] 2024-11-17T15:29:41,614 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T15:29:41,616 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33645, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T15:29:41,626 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 819 msec 2024-11-17T15:29:41,626 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731857381626, completionTime=-1 2024-11-17T15:29:41,626 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-17T15:29:41,626 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-17T15:29:41,629 INFO [master/7a780d55532c:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-17T15:29:41,629 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731857441629 2024-11-17T15:29:41,629 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731857501629 2024-11-17T15:29:41,630 INFO [master/7a780d55532c:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 3 msec 2024-11-17T15:29:41,630 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,33923,1731857380422-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:41,630 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,33923,1731857380422-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:41,630 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,33923,1731857380422-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:41,630 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7a780d55532c:33923, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:41,630 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:41,630 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:41,633 DEBUG [master/7a780d55532c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-17T15:29:41,636 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.052sec 2024-11-17T15:29:41,636 INFO [master/7a780d55532c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-17T15:29:41,636 INFO [master/7a780d55532c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-17T15:29:41,636 INFO [master/7a780d55532c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-17T15:29:41,636 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-17T15:29:41,636 INFO [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-17T15:29:41,636 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,33923,1731857380422-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T15:29:41,637 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,33923,1731857380422-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-17T15:29:41,643 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-17T15:29:41,643 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-17T15:29:41,643 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,33923,1731857380422-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:41,658 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72aab0a4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T15:29:41,658 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7a780d55532c,33923,-1 for getting cluster id 2024-11-17T15:29:41,659 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-17T15:29:41,661 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '2700cc3b-5d9d-4265-b7be-e4e56f487f4a' 2024-11-17T15:29:41,661 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-17T15:29:41,662 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "2700cc3b-5d9d-4265-b7be-e4e56f487f4a" 2024-11-17T15:29:41,662 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ac0e8a9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T15:29:41,662 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7a780d55532c,33923,-1] 2024-11-17T15:29:41,662 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-17T15:29:41,663 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:29:41,664 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59130, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-17T15:29:41,665 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62489ce6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T15:29:41,666 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T15:29:41,667 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7a780d55532c,38057,1731857380513, seqNum=-1] 2024-11-17T15:29:41,667 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T15:29:41,669 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41248, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T15:29:41,670 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7a780d55532c,33923,1731857380422 2024-11-17T15:29:41,671 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:29:41,675 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-17T15:29:41,691 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7a780d55532c:0 server-side Connection retries=45 2024-11-17T15:29:41,691 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T15:29:41,691 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T15:29:41,691 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T15:29:41,691 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T15:29:41,691 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T15:29:41,691 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-17T15:29:41,691 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T15:29:41,692 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35049 2024-11-17T15:29:41,693 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35049 connecting to ZooKeeper ensemble=127.0.0.1:53267 2024-11-17T15:29:41,694 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:29:41,695 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:29:41,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:350490x0, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T15:29:41,700 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35049-0x101268c5ec30002 connected 2024-11-17T15:29:41,700 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:35049-0x101268c5ec30002, quorum=127.0.0.1:53267, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-17T15:29:41,700 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-17T15:29:41,701 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-17T15:29:41,705 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-17T15:29:41,706 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:35049-0x101268c5ec30002, quorum=127.0.0.1:53267, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-17T15:29:41,707 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35049-0x101268c5ec30002, quorum=127.0.0.1:53267, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T15:29:41,708 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35049 2024-11-17T15:29:41,708 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35049 2024-11-17T15:29:41,712 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35049 2024-11-17T15:29:41,715 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35049 2024-11-17T15:29:41,715 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35049 2024-11-17T15:29:41,717 INFO [RS:1;7a780d55532c:35049 {}] regionserver.HRegionServer(746): ClusterId : 2700cc3b-5d9d-4265-b7be-e4e56f487f4a 2024-11-17T15:29:41,717 DEBUG [RS:1;7a780d55532c:35049 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-17T15:29:41,720 DEBUG [RS:1;7a780d55532c:35049 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-17T15:29:41,720 DEBUG [RS:1;7a780d55532c:35049 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-17T15:29:41,723 DEBUG [RS:1;7a780d55532c:35049 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-17T15:29:41,724 DEBUG [RS:1;7a780d55532c:35049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a19767a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7a780d55532c/172.17.0.2:0 2024-11-17T15:29:41,744 DEBUG [RS:1;7a780d55532c:35049 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;7a780d55532c:35049 2024-11-17T15:29:41,744 INFO [RS:1;7a780d55532c:35049 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-17T15:29:41,744 INFO [RS:1;7a780d55532c:35049 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-17T15:29:41,744 DEBUG [RS:1;7a780d55532c:35049 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-17T15:29:41,745 INFO [RS:1;7a780d55532c:35049 {}] regionserver.HRegionServer(2659): reportForDuty to master=7a780d55532c,33923,1731857380422 with port=35049, startcode=1731857381690 2024-11-17T15:29:41,745 DEBUG [RS:1;7a780d55532c:35049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-17T15:29:41,748 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33133, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-17T15:29:41,749 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33923 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7a780d55532c,35049,1731857381690 2024-11-17T15:29:41,749 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33923 {}] master.ServerManager(517): Registering regionserver=7a780d55532c,35049,1731857381690 2024-11-17T15:29:41,751 DEBUG [RS:1;7a780d55532c:35049 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac 2024-11-17T15:29:41,751 DEBUG [RS:1;7a780d55532c:35049 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42915 2024-11-17T15:29:41,751 DEBUG [RS:1;7a780d55532c:35049 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-17T15:29:41,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33923-0x101268c5ec30000, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T15:29:41,757 DEBUG [RS:1;7a780d55532c:35049 {}] zookeeper.ZKUtil(111): regionserver:35049-0x101268c5ec30002, quorum=127.0.0.1:53267, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7a780d55532c,35049,1731857381690 2024-11-17T15:29:41,757 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7a780d55532c,35049,1731857381690] 2024-11-17T15:29:41,757 WARN [RS:1;7a780d55532c:35049 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T15:29:41,757 INFO [RS:1;7a780d55532c:35049 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T15:29:41,757 DEBUG [RS:1;7a780d55532c:35049 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690 2024-11-17T15:29:41,765 INFO [RS:1;7a780d55532c:35049 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-17T15:29:41,768 INFO [RS:1;7a780d55532c:35049 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-17T15:29:41,771 INFO [RS:1;7a780d55532c:35049 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-17T15:29:41,771 INFO [RS:1;7a780d55532c:35049 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:41,771 INFO [RS:1;7a780d55532c:35049 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-17T15:29:41,773 INFO [RS:1;7a780d55532c:35049 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-17T15:29:41,773 INFO [RS:1;7a780d55532c:35049 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:41,774 DEBUG [RS:1;7a780d55532c:35049 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:41,774 DEBUG [RS:1;7a780d55532c:35049 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:41,774 DEBUG [RS:1;7a780d55532c:35049 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:41,774 DEBUG [RS:1;7a780d55532c:35049 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:41,774 DEBUG [RS:1;7a780d55532c:35049 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:41,775 DEBUG [RS:1;7a780d55532c:35049 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7a780d55532c:0, corePoolSize=2, maxPoolSize=2 2024-11-17T15:29:41,775 DEBUG [RS:1;7a780d55532c:35049 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:41,775 DEBUG [RS:1;7a780d55532c:35049 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:41,775 DEBUG [RS:1;7a780d55532c:35049 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:41,775 DEBUG [RS:1;7a780d55532c:35049 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:41,775 DEBUG [RS:1;7a780d55532c:35049 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:41,775 DEBUG [RS:1;7a780d55532c:35049 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:29:41,775 DEBUG [RS:1;7a780d55532c:35049 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7a780d55532c:0, corePoolSize=3, maxPoolSize=3 2024-11-17T15:29:41,775 DEBUG [RS:1;7a780d55532c:35049 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0, corePoolSize=3, maxPoolSize=3 2024-11-17T15:29:41,780 INFO [RS:1;7a780d55532c:35049 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:41,780 INFO [RS:1;7a780d55532c:35049 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:41,781 INFO [RS:1;7a780d55532c:35049 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:41,781 INFO [RS:1;7a780d55532c:35049 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:41,781 INFO [RS:1;7a780d55532c:35049 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:41,781 INFO [RS:1;7a780d55532c:35049 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,35049,1731857381690-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T15:29:41,801 INFO [RS:1;7a780d55532c:35049 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-17T15:29:41,801 INFO [RS:1;7a780d55532c:35049 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,35049,1731857381690-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:41,801 INFO [RS:1;7a780d55532c:35049 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:41,801 INFO [RS:1;7a780d55532c:35049 {}] regionserver.Replication(171): 7a780d55532c,35049,1731857381690 started 2024-11-17T15:29:41,819 INFO [RS:1;7a780d55532c:35049 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:29:41,819 INFO [RS:1;7a780d55532c:35049 {}] regionserver.HRegionServer(1482): Serving as 7a780d55532c,35049,1731857381690, RpcServer on 7a780d55532c/172.17.0.2:35049, sessionid=0x101268c5ec30002 2024-11-17T15:29:41,819 DEBUG [RS:1;7a780d55532c:35049 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-17T15:29:41,819 DEBUG [RS:1;7a780d55532c:35049 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7a780d55532c,35049,1731857381690 2024-11-17T15:29:41,819 DEBUG [RS:1;7a780d55532c:35049 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a780d55532c,35049,1731857381690' 2024-11-17T15:29:41,819 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;7a780d55532c:35049,5,FailOnTimeoutGroup] 2024-11-17T15:29:41,819 DEBUG [RS:1;7a780d55532c:35049 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-17T15:29:41,820 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-17T15:29:41,820 DEBUG [RS:1;7a780d55532c:35049 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-17T15:29:41,820 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-17T15:29:41,820 DEBUG [RS:1;7a780d55532c:35049 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-17T15:29:41,820 DEBUG [RS:1;7a780d55532c:35049 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-17T15:29:41,820 DEBUG [RS:1;7a780d55532c:35049 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7a780d55532c,35049,1731857381690 2024-11-17T15:29:41,820 DEBUG [RS:1;7a780d55532c:35049 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a780d55532c,35049,1731857381690' 2024-11-17T15:29:41,820 DEBUG [RS:1;7a780d55532c:35049 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-17T15:29:41,821 DEBUG [RS:1;7a780d55532c:35049 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-17T15:29:41,821 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 7a780d55532c,33923,1731857380422 2024-11-17T15:29:41,822 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@21687f1c 2024-11-17T15:29:41,822 DEBUG [RS:1;7a780d55532c:35049 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-17T15:29:41,822 INFO [RS:1;7a780d55532c:35049 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-17T15:29:41,822 INFO [RS:1;7a780d55532c:35049 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-17T15:29:41,822 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-17T15:29:41,824 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59132, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-17T15:29:41,824 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33923 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-17T15:29:41,824 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33923 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-17T15:29:41,825 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33923 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T15:29:41,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33923 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-17T15:29:41,828 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-17T15:29:41,828 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:29:41,829 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33923 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-17T15:29:41,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33923 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-17T15:29:41,830 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-17T15:29:41,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34067 is added to blk_1073741835_1011 (size=393) 2024-11-17T15:29:41,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41873 is added to blk_1073741835_1011 (size=393) 2024-11-17T15:29:41,844 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b38f27f49ebe8200b4255f09e391ea9b, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac 2024-11-17T15:29:41,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41873 is added to blk_1073741836_1012 (size=76) 2024-11-17T15:29:41,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34067 is added to blk_1073741836_1012 (size=76) 2024-11-17T15:29:41,852 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:29:41,852 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing b38f27f49ebe8200b4255f09e391ea9b, disabling compactions & flushes 2024-11-17T15:29:41,852 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b. 2024-11-17T15:29:41,853 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b. 2024-11-17T15:29:41,853 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b. after waiting 0 ms 2024-11-17T15:29:41,853 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b. 2024-11-17T15:29:41,853 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b. 2024-11-17T15:29:41,853 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for b38f27f49ebe8200b4255f09e391ea9b: Waiting for close lock at 1731857381852Disabling compacts and flushes for region at 1731857381852Disabling writes for close at 1731857381853 (+1 ms)Writing region close event to WAL at 1731857381853Closed at 1731857381853 2024-11-17T15:29:41,854 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-17T15:29:41,855 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731857381855"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731857381855"}]},"ts":"1731857381855"} 2024-11-17T15:29:41,857 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-17T15:29:41,858 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-17T15:29:41,859 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731857381859"}]},"ts":"1731857381859"} 2024-11-17T15:29:41,861 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-17T15:29:41,861 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=b38f27f49ebe8200b4255f09e391ea9b, ASSIGN}] 2024-11-17T15:29:41,863 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=b38f27f49ebe8200b4255f09e391ea9b, ASSIGN 2024-11-17T15:29:41,865 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=b38f27f49ebe8200b4255f09e391ea9b, ASSIGN; state=OFFLINE, location=7a780d55532c,38057,1731857380513; forceNewPlan=false, retain=false 2024-11-17T15:29:41,924 INFO [RS:1;7a780d55532c:35049 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a780d55532c%2C35049%2C1731857381690, suffix=, logDir=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690, archiveDir=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/oldWALs, maxLogs=32 2024-11-17T15:29:41,925 INFO [RS:1;7a780d55532c:35049 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C35049%2C1731857381690.1731857381925 2024-11-17T15:29:41,932 INFO [RS:1;7a780d55532c:35049 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 2024-11-17T15:29:41,936 DEBUG [RS:1;7a780d55532c:35049 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33713:33713),(127.0.0.1/127.0.0.1:37089:37089)] 2024-11-17T15:29:42,017 INFO [7a780d55532c:33923 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-17T15:29:42,017 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b38f27f49ebe8200b4255f09e391ea9b, regionState=OPENING, regionLocation=7a780d55532c,38057,1731857380513 2024-11-17T15:29:42,021 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=b38f27f49ebe8200b4255f09e391ea9b, ASSIGN because future has completed 2024-11-17T15:29:42,022 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b38f27f49ebe8200b4255f09e391ea9b, server=7a780d55532c,38057,1731857380513}] 2024-11-17T15:29:42,046 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-17T15:29:42,052 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:29:42,076 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:29:42,078 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:29:42,079 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:29:42,181 INFO [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b. 2024-11-17T15:29:42,181 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => b38f27f49ebe8200b4255f09e391ea9b, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b.', STARTKEY => '', ENDKEY => ''} 2024-11-17T15:29:42,181 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath b38f27f49ebe8200b4255f09e391ea9b 2024-11-17T15:29:42,181 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:29:42,182 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for b38f27f49ebe8200b4255f09e391ea9b 2024-11-17T15:29:42,182 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for b38f27f49ebe8200b4255f09e391ea9b 2024-11-17T15:29:42,183 INFO [StoreOpener-b38f27f49ebe8200b4255f09e391ea9b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region b38f27f49ebe8200b4255f09e391ea9b 2024-11-17T15:29:42,185 INFO [StoreOpener-b38f27f49ebe8200b4255f09e391ea9b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b38f27f49ebe8200b4255f09e391ea9b columnFamilyName info 2024-11-17T15:29:42,185 DEBUG [StoreOpener-b38f27f49ebe8200b4255f09e391ea9b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:29:42,185 INFO [StoreOpener-b38f27f49ebe8200b4255f09e391ea9b-1 {}] regionserver.HStore(327): Store=b38f27f49ebe8200b4255f09e391ea9b/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T15:29:42,185 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for b38f27f49ebe8200b4255f09e391ea9b 2024-11-17T15:29:42,187 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b 2024-11-17T15:29:42,188 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b 2024-11-17T15:29:42,189 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for b38f27f49ebe8200b4255f09e391ea9b 2024-11-17T15:29:42,189 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for b38f27f49ebe8200b4255f09e391ea9b 2024-11-17T15:29:42,191 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for b38f27f49ebe8200b4255f09e391ea9b 2024-11-17T15:29:42,193 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T15:29:42,193 INFO [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened b38f27f49ebe8200b4255f09e391ea9b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=739192, jitterRate=-0.060069769620895386}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-17T15:29:42,194 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b38f27f49ebe8200b4255f09e391ea9b 2024-11-17T15:29:42,194 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for b38f27f49ebe8200b4255f09e391ea9b: Running coprocessor pre-open hook at 1731857382182Writing region info on filesystem at 1731857382182Initializing all the Stores at 1731857382183 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857382183Cleaning up temporary data from old regions at 1731857382189 (+6 ms)Running coprocessor post-open hooks at 1731857382194 (+5 ms)Region opened successfully at 1731857382194 2024-11-17T15:29:42,195 INFO [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b., pid=6, masterSystemTime=1731857382176 2024-11-17T15:29:42,198 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b. 2024-11-17T15:29:42,198 INFO [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b. 2024-11-17T15:29:42,199 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b38f27f49ebe8200b4255f09e391ea9b, regionState=OPEN, openSeqNum=2, regionLocation=7a780d55532c,38057,1731857380513 2024-11-17T15:29:42,201 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b38f27f49ebe8200b4255f09e391ea9b, server=7a780d55532c,38057,1731857380513 because future has completed 2024-11-17T15:29:42,205 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-17T15:29:42,205 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure b38f27f49ebe8200b4255f09e391ea9b, server=7a780d55532c,38057,1731857380513 in 181 msec 2024-11-17T15:29:42,208 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-17T15:29:42,209 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=b38f27f49ebe8200b4255f09e391ea9b, ASSIGN in 344 msec 2024-11-17T15:29:42,210 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-17T15:29:42,210 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731857382210"}]},"ts":"1731857382210"} 2024-11-17T15:29:42,213 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-17T15:29:42,214 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-17T15:29:42,216 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 389 msec 2024-11-17T15:29:47,066 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-17T15:29:47,068 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:29:47,083 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:29:47,084 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:29:47,085 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:29:47,091 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-17T15:29:47,092 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-17T15:29:51,379 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-17T15:29:51,379 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-17T15:29:51,380 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-17T15:29:51,380 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-17T15:29:51,381 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T15:29:51,381 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-17T15:29:51,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33923 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-17T15:29:51,916 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-17T15:29:51,916 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-17T15:29:51,919 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-17T15:29:51,919 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b. 2024-11-17T15:29:51,932 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:29:51,935 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T15:29:51,935 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T15:29:51,936 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T15:29:51,936 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T15:29:51,936 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@122548d1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/hadoop.log.dir/,AVAILABLE} 2024-11-17T15:29:51,937 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a5ed106{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T15:29:52,059 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@47e3d6b8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/java.io.tmpdir/jetty-localhost-45741-hadoop-hdfs-3_4_1-tests_jar-_-any-15938261486010751433/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:29:52,060 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6825a050{HTTP/1.1, (http/1.1)}{localhost:45741} 2024-11-17T15:29:52,060 INFO [Time-limited test {}] server.Server(415): Started @115982ms 2024-11-17T15:29:52,061 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T15:29:52,093 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:29:52,097 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T15:29:52,098 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T15:29:52,098 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T15:29:52,098 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T15:29:52,098 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@9cb12d7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/hadoop.log.dir/,AVAILABLE} 2024-11-17T15:29:52,099 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@a86ef00{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T15:29:52,161 WARN [Thread-828 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data5/current/BP-398811610-172.17.0.2-1731857379530/current, will proceed with Du for space computation calculation, 2024-11-17T15:29:52,161 WARN [Thread-829 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data6/current/BP-398811610-172.17.0.2-1731857379530/current, will proceed with Du for space computation calculation, 2024-11-17T15:29:52,179 WARN [Thread-808 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T15:29:52,181 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x34c31a5eb5b1b86d with lease ID 0x6bf99bb86d5c7787: Processing first storage report for DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f from datanode DatanodeRegistration(127.0.0.1:36583, datanodeUuid=ee2b034e-b9dd-4629-b935-c813f78579f6, infoPort=45059, infoSecurePort=0, ipcPort=43109, storageInfo=lv=-57;cid=testClusterID;nsid=378286705;c=1731857379530) 2024-11-17T15:29:52,181 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x34c31a5eb5b1b86d with lease ID 0x6bf99bb86d5c7787: from storage DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f node DatanodeRegistration(127.0.0.1:36583, datanodeUuid=ee2b034e-b9dd-4629-b935-c813f78579f6, infoPort=45059, infoSecurePort=0, ipcPort=43109, storageInfo=lv=-57;cid=testClusterID;nsid=378286705;c=1731857379530), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T15:29:52,181 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x34c31a5eb5b1b86d with lease ID 0x6bf99bb86d5c7787: Processing first storage report for DS-f3b959f3-598c-4394-a67c-96d8b7328f0e from datanode DatanodeRegistration(127.0.0.1:36583, datanodeUuid=ee2b034e-b9dd-4629-b935-c813f78579f6, infoPort=45059, infoSecurePort=0, ipcPort=43109, storageInfo=lv=-57;cid=testClusterID;nsid=378286705;c=1731857379530) 2024-11-17T15:29:52,181 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x34c31a5eb5b1b86d with lease ID 0x6bf99bb86d5c7787: from storage DS-f3b959f3-598c-4394-a67c-96d8b7328f0e node DatanodeRegistration(127.0.0.1:36583, datanodeUuid=ee2b034e-b9dd-4629-b935-c813f78579f6, infoPort=45059, infoSecurePort=0, ipcPort=43109, storageInfo=lv=-57;cid=testClusterID;nsid=378286705;c=1731857379530), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T15:29:52,218 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3301cee2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/java.io.tmpdir/jetty-localhost-35587-hadoop-hdfs-3_4_1-tests_jar-_-any-14660392560560609595/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:29:52,219 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4e6d719c{HTTP/1.1, (http/1.1)}{localhost:35587} 2024-11-17T15:29:52,219 INFO [Time-limited test {}] server.Server(415): Started @116141ms 2024-11-17T15:29:52,221 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T15:29:52,257 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:29:52,260 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T15:29:52,261 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T15:29:52,261 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T15:29:52,261 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T15:29:52,262 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3916c602{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/hadoop.log.dir/,AVAILABLE} 2024-11-17T15:29:52,262 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@fd9d9ba{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T15:29:52,320 WARN [Thread-863 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data7/current/BP-398811610-172.17.0.2-1731857379530/current, will proceed with Du for space computation calculation, 2024-11-17T15:29:52,321 WARN [Thread-864 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data8/current/BP-398811610-172.17.0.2-1731857379530/current, will proceed with Du for space computation calculation, 2024-11-17T15:29:52,344 WARN [Thread-843 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T15:29:52,346 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5c3d5c72d3e81b2c with lease ID 0x6bf99bb86d5c7788: Processing first storage report for DS-9aafc437-98aa-4702-8637-e862e802d273 from datanode DatanodeRegistration(127.0.0.1:37341, datanodeUuid=42b6b50f-ae35-42e2-bf0f-012e79f43601, infoPort=34697, infoSecurePort=0, ipcPort=35119, storageInfo=lv=-57;cid=testClusterID;nsid=378286705;c=1731857379530) 2024-11-17T15:29:52,346 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5c3d5c72d3e81b2c with lease ID 0x6bf99bb86d5c7788: from storage DS-9aafc437-98aa-4702-8637-e862e802d273 node DatanodeRegistration(127.0.0.1:37341, datanodeUuid=42b6b50f-ae35-42e2-bf0f-012e79f43601, infoPort=34697, infoSecurePort=0, ipcPort=35119, storageInfo=lv=-57;cid=testClusterID;nsid=378286705;c=1731857379530), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T15:29:52,346 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5c3d5c72d3e81b2c with lease ID 0x6bf99bb86d5c7788: Processing first storage report for DS-03c4141a-a3c1-4426-a145-40b818a1585c from datanode DatanodeRegistration(127.0.0.1:37341, datanodeUuid=42b6b50f-ae35-42e2-bf0f-012e79f43601, infoPort=34697, infoSecurePort=0, ipcPort=35119, storageInfo=lv=-57;cid=testClusterID;nsid=378286705;c=1731857379530) 2024-11-17T15:29:52,346 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5c3d5c72d3e81b2c with lease ID 0x6bf99bb86d5c7788: from storage DS-03c4141a-a3c1-4426-a145-40b818a1585c node DatanodeRegistration(127.0.0.1:37341, datanodeUuid=42b6b50f-ae35-42e2-bf0f-012e79f43601, infoPort=34697, infoSecurePort=0, ipcPort=35119, storageInfo=lv=-57;cid=testClusterID;nsid=378286705;c=1731857379530), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T15:29:52,380 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@479380cb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/java.io.tmpdir/jetty-localhost-40793-hadoop-hdfs-3_4_1-tests_jar-_-any-18249076297920935044/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:29:52,380 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5e61007e{HTTP/1.1, (http/1.1)}{localhost:40793} 2024-11-17T15:29:52,380 INFO [Time-limited test {}] server.Server(415): Started @116302ms 2024-11-17T15:29:52,382 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T15:29:52,484 WARN [Thread-889 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data9/current/BP-398811610-172.17.0.2-1731857379530/current, will proceed with Du for space computation calculation, 2024-11-17T15:29:52,484 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data10/current/BP-398811610-172.17.0.2-1731857379530/current, will proceed with Du for space computation calculation, 2024-11-17T15:29:52,500 WARN [Thread-878 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T15:29:52,503 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe18d8fcdc7f399ba with lease ID 0x6bf99bb86d5c7789: Processing first storage report for DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d from datanode DatanodeRegistration(127.0.0.1:41901, datanodeUuid=33316117-b2f4-4744-8272-772d237d836c, infoPort=37657, infoSecurePort=0, ipcPort=34197, storageInfo=lv=-57;cid=testClusterID;nsid=378286705;c=1731857379530) 2024-11-17T15:29:52,503 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe18d8fcdc7f399ba with lease ID 0x6bf99bb86d5c7789: from storage DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d node DatanodeRegistration(127.0.0.1:41901, datanodeUuid=33316117-b2f4-4744-8272-772d237d836c, infoPort=37657, infoSecurePort=0, ipcPort=34197, storageInfo=lv=-57;cid=testClusterID;nsid=378286705;c=1731857379530), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T15:29:52,503 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe18d8fcdc7f399ba with lease ID 0x6bf99bb86d5c7789: Processing first storage report for DS-0033af4e-847a-452d-b54b-1ae769f07800 from datanode DatanodeRegistration(127.0.0.1:41901, datanodeUuid=33316117-b2f4-4744-8272-772d237d836c, infoPort=37657, infoSecurePort=0, ipcPort=34197, storageInfo=lv=-57;cid=testClusterID;nsid=378286705;c=1731857379530) 2024-11-17T15:29:52,503 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe18d8fcdc7f399ba with lease ID 0x6bf99bb86d5c7789: from storage DS-0033af4e-847a-452d-b54b-1ae769f07800 node DatanodeRegistration(127.0.0.1:41901, datanodeUuid=33316117-b2f4-4744-8272-772d237d836c, infoPort=37657, infoSecurePort=0, ipcPort=34197, storageInfo=lv=-57;cid=testClusterID;nsid=378286705;c=1731857379530), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T15:29:52,602 WARN [ResponseProcessor for block BP-398811610-172.17.0.2-1731857379530:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-398811610-172.17.0.2-1731857379530:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:29:52,602 WARN [ResponseProcessor for block BP-398811610-172.17.0.2-1731857379530:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-398811610-172.17.0.2-1731857379530:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:29:52,602 WARN [ResponseProcessor for block BP-398811610-172.17.0.2-1731857379530:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-398811610-172.17.0.2-1731857379530:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-398811610-172.17.0.2-1731857379530:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:29:52,602 WARN [ResponseProcessor for block BP-398811610-172.17.0.2-1731857379530:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-398811610-172.17.0.2-1731857379530:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:29:52,603 WARN [DataStreamer for file /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta block BP-398811610-172.17.0.2-1731857379530:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK], DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK]) is bad. 2024-11-17T15:29:52,603 WARN [DataStreamer for file /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/WALs/7a780d55532c,33923,1731857380422/7a780d55532c%2C33923%2C1731857380422.1731857380683 block BP-398811610-172.17.0.2-1731857379530:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK], DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK]) is bad. 2024-11-17T15:29:52,603 WARN [PacketResponder: BP-398811610-172.17.0.2-1731857379530:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:41873] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:29:52,603 WARN [DataStreamer for file /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857381092 block BP-398811610-172.17.0.2-1731857379530:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK], DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK]) is bad. 2024-11-17T15:29:52,603 WARN [DataStreamer for file /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 block BP-398811610-172.17.0.2-1731857379530:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK], DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK]) is bad. 2024-11-17T15:29:52,604 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:57044 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34067:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57044 dst: /127.0.0.1:34067 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:29:52,604 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1823095610_22 at /127.0.0.1:57020 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34067:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57020 dst: /127.0.0.1:34067 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:29:52,604 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:57056 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34067:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57056 dst: /127.0.0.1:34067 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:29:52,604 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1501561265_22 at /127.0.0.1:57068 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:34067:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57068 dst: /127.0.0.1:34067 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:29:52,604 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1823095610_22 at /127.0.0.1:49686 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:41873:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49686 dst: /127.0.0.1:41873 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:29:52,604 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1501561265_22 at /127.0.0.1:49764 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:41873:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49764 dst: /127.0.0.1:41873 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:29:52,604 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:49730 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:41873:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49730 dst: /127.0.0.1:41873 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:29:52,605 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@707b5b0d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:29:52,605 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:49722 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:41873:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49722 dst: /127.0.0.1:41873 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:29:52,606 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5ff23317{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T15:29:52,606 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T15:29:52,606 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@56aeb798{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T15:29:52,606 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5c5cbc59{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/hadoop.log.dir/,STOPPED} 2024-11-17T15:29:52,609 WARN [BP-398811610-172.17.0.2-1731857379530 heartbeating to localhost/127.0.0.1:42915 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T15:29:52,609 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T15:29:52,609 WARN [BP-398811610-172.17.0.2-1731857379530 heartbeating to localhost/127.0.0.1:42915 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-398811610-172.17.0.2-1731857379530 (Datanode Uuid cad15bd0-c8d1-4c46-ba45-a08acfe53d98) service to localhost/127.0.0.1:42915 2024-11-17T15:29:52,609 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T15:29:52,610 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data3/current/BP-398811610-172.17.0.2-1731857379530 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:29:52,610 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data4/current/BP-398811610-172.17.0.2-1731857379530 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:29:52,610 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T15:29:52,611 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@2515365d {}] datanode.DataXceiver(331): 127.0.0.1:34067:DataXceiver error processing unknown operation src: /127.0.0.1:39410 dst: /127.0.0.1:34067 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:29:52,611 WARN [DataStreamer for file /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/WALs/7a780d55532c,33923,1731857380422/7a780d55532c%2C33923%2C1731857380422.1731857380683 block BP-398811610-172.17.0.2-1731857379530:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:29:52,611 WARN [DataStreamer for file /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857381092 block BP-398811610-172.17.0.2-1731857379530:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:29:52,611 WARN [DataStreamer for file /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta block BP-398811610-172.17.0.2-1731857379530:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:29:52,611 WARN [DataStreamer for file /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 block BP-398811610-172.17.0.2-1731857379530:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:29:52,613 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@295bc59e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:29:52,614 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@517c4920{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T15:29:52,614 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T15:29:52,614 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6b94495b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T15:29:52,614 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@a95d0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/hadoop.log.dir/,STOPPED} 2024-11-17T15:29:52,615 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T15:29:52,616 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T15:29:52,616 WARN [BP-398811610-172.17.0.2-1731857379530 heartbeating to localhost/127.0.0.1:42915 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T15:29:52,616 WARN [BP-398811610-172.17.0.2-1731857379530 heartbeating to localhost/127.0.0.1:42915 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-398811610-172.17.0.2-1731857379530 (Datanode Uuid 59b0f98c-0267-4075-b69d-ab99564b63dc) service to localhost/127.0.0.1:42915 2024-11-17T15:29:52,616 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data1/current/BP-398811610-172.17.0.2-1731857379530 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:29:52,617 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data2/current/BP-398811610-172.17.0.2-1731857379530 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:29:52,617 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T15:29:52,621 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b., hostname=7a780d55532c,38057,1731857380513, seqNum=2] 2024-11-17T15:29:52,623 ERROR [FSHLog-0-hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac-prefix:7a780d55532c,38057,1731857380513 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:29:52,623 WARN [FSHLog-0-hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac-prefix:7a780d55532c,38057,1731857380513 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:29:52,623 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:29:52,623 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7a780d55532c%2C38057%2C1731857380513:(num 1731857381092) roll requested 2024-11-17T15:29:52,624 INFO [regionserver/7a780d55532c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C38057%2C1731857380513.1731857392624 2024-11-17T15:29:52,630 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:52,630 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:52,630 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:52,631 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:52,631 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:52,631 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857381092 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857392624 2024-11-17T15:29:52,631 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:29:52,631 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:29:52,633 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-17T15:29:52,633 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37657:37657),(127.0.0.1/127.0.0.1:45059:45059)] 2024-11-17T15:29:52,633 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-17T15:29:52,633 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857381092 is not closed yet, will try archiving it next time 2024-11-17T15:29:52,633 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857381092 2024-11-17T15:29:52,636 WARN [IPC Server handler 1 on default port 42915 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857381092 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-11-17T15:29:52,636 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:29:52,639 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857381092 after 5ms 2024-11-17T15:29:53,781 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:29:54,634 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:29:54,635 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857392624 2024-11-17T15:29:54,635 WARN [ResponseProcessor for block BP-398811610-172.17.0.2-1731857379530:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-398811610-172.17.0.2-1731857379530:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:29:54,636 WARN [DataStreamer for file /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857392624 block BP-398811610-172.17.0.2-1731857379530:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK], DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK]) is bad. 2024-11-17T15:29:54,636 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:47206 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:41901:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47206 dst: /127.0.0.1:41901 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:29:54,637 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:29:54,637 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:42736 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:36583:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42736 dst: /127.0.0.1:36583 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:29:54,638 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@479380cb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:29:54,639 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5e61007e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T15:29:54,639 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T15:29:54,639 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@fd9d9ba{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T15:29:54,639 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3916c602{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/hadoop.log.dir/,STOPPED} 2024-11-17T15:29:54,641 WARN [BP-398811610-172.17.0.2-1731857379530 heartbeating to localhost/127.0.0.1:42915 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T15:29:54,641 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T15:29:54,641 WARN [BP-398811610-172.17.0.2-1731857379530 heartbeating to localhost/127.0.0.1:42915 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-398811610-172.17.0.2-1731857379530 (Datanode Uuid 33316117-b2f4-4744-8272-772d237d836c) service to localhost/127.0.0.1:42915 2024-11-17T15:29:54,641 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T15:29:54,642 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data9/current/BP-398811610-172.17.0.2-1731857379530 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:29:54,642 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data10/current/BP-398811610-172.17.0.2-1731857379530 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:29:54,642 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T15:29:55,781 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:29:56,635 WARN [regionserver/7a780d55532c:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK]] 2024-11-17T15:29:56,635 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:29:56,635 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7a780d55532c%2C38057%2C1731857380513:(num 1731857392624) roll requested 2024-11-17T15:29:56,635 INFO [regionserver/7a780d55532c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C38057%2C1731857380513.1731857396635 2024-11-17T15:29:56,637 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:29:56,638 WARN [Thread-910 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:29:56,638 WARN [Thread-910 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK], DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK]) is bad. 2024-11-17T15:29:56,639 WARN [Thread-910 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741839_1021 2024-11-17T15:29:56,640 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857381092 after 4007ms 2024-11-17T15:29:56,641 WARN [Thread-910 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK] 2024-11-17T15:29:56,644 WARN [Thread-910 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:29:56,645 WARN [Thread-910 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK], DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK]) is bad. 2024-11-17T15:29:56,645 WARN [Thread-910 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741840_1022 2024-11-17T15:29:56,645 WARN [Thread-910 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK] 2024-11-17T15:29:56,649 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-17T15:29:56,650 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:56,650 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:56,650 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:56,650 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:56,650 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:29:56,651 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857392624 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857396635 2024-11-17T15:29:56,652 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34697:34697),(127.0.0.1/127.0.0.1:45059:45059)] 2024-11-17T15:29:56,652 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857381092 is not closed yet, will try archiving it next time 2024-11-17T15:29:56,652 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857392624 is not closed yet, will try archiving it next time 2024-11-17T15:29:56,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36583 is added to blk_1073741838_1020 (size=2431) 2024-11-17T15:29:57,054 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857381092 is not closed yet, will try archiving it next time 2024-11-17T15:29:57,781 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:29:58,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37341 is added to blk_1073741838_1020 (size=2431) 2024-11-17T15:29:58,637 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:29:58,652 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:29:58,654 WARN [ResponseProcessor for block BP-398811610-172.17.0.2-1731857379530:blk_1073741841_1023 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-398811610-172.17.0.2-1731857379530:blk_1073741841_1023 java.io.IOException: Bad response ERROR for BP-398811610-172.17.0.2-1731857379530:blk_1073741841_1023 from datanode DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:29:58,654 WARN [DataStreamer for file /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857396635 block BP-398811610-172.17.0.2-1731857379530:blk_1073741841_1023 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37341,DS-9aafc437-98aa-4702-8637-e862e802d273,DISK], DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK]) is bad. 2024-11-17T15:29:58,654 WARN [PacketResponder: BP-398811610-172.17.0.2-1731857379530:blk_1073741841_1023, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:36583] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:29:58,655 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:43660 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:37341:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43660 dst: /127.0.0.1:37341 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:29:58,655 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:42744 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:36583:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42744 dst: /127.0.0.1:36583 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:29:58,656 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@47e3d6b8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:29:58,656 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6825a050{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T15:29:58,656 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T15:29:58,657 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a5ed106{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T15:29:58,657 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@122548d1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/hadoop.log.dir/,STOPPED} 2024-11-17T15:29:58,658 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T15:29:58,658 WARN [BP-398811610-172.17.0.2-1731857379530 heartbeating to localhost/127.0.0.1:42915 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T15:29:58,658 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T15:29:58,658 WARN [BP-398811610-172.17.0.2-1731857379530 heartbeating to localhost/127.0.0.1:42915 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-398811610-172.17.0.2-1731857379530 (Datanode Uuid ee2b034e-b9dd-4629-b935-c813f78579f6) service to localhost/127.0.0.1:42915 2024-11-17T15:29:58,659 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data5/current/BP-398811610-172.17.0.2-1731857379530 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:29:58,659 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data6/current/BP-398811610-172.17.0.2-1731857379530 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:29:58,659 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T15:29:58,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38057 {}] regionserver.HRegion(8855): Flush requested on b38f27f49ebe8200b4255f09e391ea9b 2024-11-17T15:29:58,667 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b38f27f49ebe8200b4255f09e391ea9b 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-17T15:29:58,691 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/.tmp/info/9f1e36294e3444908ad6f5604cb935f9 is 1080, key is row0002/info:/1731857394644/Put/seqid=0 2024-11-17T15:29:58,693 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:29:58,693 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK], DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK]) is bad. 2024-11-17T15:29:58,693 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741842_1025 2024-11-17T15:29:58,694 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK] 2024-11-17T15:29:58,696 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:43688 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741843_1026] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data8]'}, localName='127.0.0.1:37341', datanodeUuid='42b6b50f-ae35-42e2-bf0f-012e79f43601', xmitsInProgress=0}:Exception transferring block BP-398811610-172.17.0.2-1731857379530:blk_1073741843_1026 to mirror 127.0.0.1:34067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:29:58,697 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34067 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:29:58,697 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:43688 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741843_1026] {}] datanode.BlockReceiver(316): Block 1073741843 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-17T15:29:58,697 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37341,DS-9aafc437-98aa-4702-8637-e862e802d273,DISK], DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]) is bad. 2024-11-17T15:29:58,697 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741843_1026 2024-11-17T15:29:58,697 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:43688 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741843_1026] {}] datanode.DataXceiver(331): 127.0.0.1:37341:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43688 dst: /127.0.0.1:37341 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:29:58,697 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK] 2024-11-17T15:29:58,699 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:29:58,699 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK], DatanodeInfoWithStorage[127.0.0.1:37341,DS-9aafc437-98aa-4702-8637-e862e802d273,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK]) is bad. 2024-11-17T15:29:58,699 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741844_1027 2024-11-17T15:29:58,699 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK] 2024-11-17T15:29:58,701 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41873 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:29:58,701 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:43704 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741845_1028] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data8]'}, localName='127.0.0.1:37341', datanodeUuid='42b6b50f-ae35-42e2-bf0f-012e79f43601', xmitsInProgress=0}:Exception transferring block BP-398811610-172.17.0.2-1731857379530:blk_1073741845_1028 to mirror 127.0.0.1:41873 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:29:58,701 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37341,DS-9aafc437-98aa-4702-8637-e862e802d273,DISK], DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK]) is bad. 2024-11-17T15:29:58,701 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741845_1028 2024-11-17T15:29:58,701 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:43704 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741845_1028] {}] datanode.BlockReceiver(316): Block 1073741845 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-17T15:29:58,702 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:43704 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741845_1028] {}] datanode.DataXceiver(331): 127.0.0.1:37341:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43704 dst: /127.0.0.1:37341 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:29:58,702 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK] 2024-11-17T15:29:58,703 WARN [IPC Server handler 4 on default port 42915 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-17T15:29:58,703 WARN [IPC Server handler 4 on default port 42915 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-17T15:29:58,703 WARN [IPC Server handler 4 on default port 42915 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-17T15:29:58,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37341 is added to blk_1073741846_1029 (size=10347) 2024-11-17T15:29:59,107 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/.tmp/info/9f1e36294e3444908ad6f5604cb935f9 2024-11-17T15:29:59,116 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/.tmp/info/9f1e36294e3444908ad6f5604cb935f9 as hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/9f1e36294e3444908ad6f5604cb935f9 2024-11-17T15:29:59,123 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/9f1e36294e3444908ad6f5604cb935f9, entries=5, sequenceid=11, filesize=10.1 K 2024-11-17T15:29:59,124 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for b38f27f49ebe8200b4255f09e391ea9b in 457ms, sequenceid=11, compaction requested=false 2024-11-17T15:29:59,124 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b38f27f49ebe8200b4255f09e391ea9b: 2024-11-17T15:29:59,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38057 {}] regionserver.HRegion(8855): Flush requested on b38f27f49ebe8200b4255f09e391ea9b 2024-11-17T15:29:59,288 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b38f27f49ebe8200b4255f09e391ea9b 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-17T15:29:59,293 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/.tmp/info/a700bcd1db67456dbf42ff162732f759 is 1080, key is row0007/info:/1731857398668/Put/seqid=0 2024-11-17T15:29:59,295 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41901 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:29:59,295 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:43720 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741847_1030] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data8]'}, localName='127.0.0.1:37341', datanodeUuid='42b6b50f-ae35-42e2-bf0f-012e79f43601', xmitsInProgress=0}:Exception transferring block BP-398811610-172.17.0.2-1731857379530:blk_1073741847_1030 to mirror 127.0.0.1:41901 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:29:59,295 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37341,DS-9aafc437-98aa-4702-8637-e862e802d273,DISK], DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK]) is bad. 2024-11-17T15:29:59,295 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741847_1030 2024-11-17T15:29:59,296 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:43720 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741847_1030] {}] datanode.BlockReceiver(316): Block 1073741847 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-17T15:29:59,296 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:43720 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741847_1030] {}] datanode.DataXceiver(331): 127.0.0.1:37341:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43720 dst: /127.0.0.1:37341 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:29:59,296 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK] 2024-11-17T15:29:59,298 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34067 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:29:59,298 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:43722 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741848_1031] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data8]'}, localName='127.0.0.1:37341', datanodeUuid='42b6b50f-ae35-42e2-bf0f-012e79f43601', xmitsInProgress=0}:Exception transferring block BP-398811610-172.17.0.2-1731857379530:blk_1073741848_1031 to mirror 127.0.0.1:34067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:29:59,298 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37341,DS-9aafc437-98aa-4702-8637-e862e802d273,DISK], DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]) is bad. 2024-11-17T15:29:59,298 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741848_1031 2024-11-17T15:29:59,298 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:43722 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741848_1031] {}] datanode.BlockReceiver(316): Block 1073741848 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-17T15:29:59,298 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:43722 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741848_1031] {}] datanode.DataXceiver(331): 127.0.0.1:37341:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43722 dst: /127.0.0.1:37341 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:29:59,299 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK] 2024-11-17T15:29:59,301 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41873 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:29:59,301 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:43734 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741849_1032] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data8]'}, localName='127.0.0.1:37341', datanodeUuid='42b6b50f-ae35-42e2-bf0f-012e79f43601', xmitsInProgress=0}:Exception transferring block BP-398811610-172.17.0.2-1731857379530:blk_1073741849_1032 to mirror 127.0.0.1:41873 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:29:59,301 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37341,DS-9aafc437-98aa-4702-8637-e862e802d273,DISK], DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK]) is bad. 2024-11-17T15:29:59,301 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741849_1032 2024-11-17T15:29:59,301 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:43734 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741849_1032] {}] datanode.BlockReceiver(316): Block 1073741849 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-17T15:29:59,301 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:43734 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741849_1032] {}] datanode.DataXceiver(331): 127.0.0.1:37341:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43734 dst: /127.0.0.1:37341 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:29:59,302 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK] 2024-11-17T15:29:59,304 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:43738 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741850_1033] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data8]'}, localName='127.0.0.1:37341', datanodeUuid='42b6b50f-ae35-42e2-bf0f-012e79f43601', xmitsInProgress=0}:Exception transferring block BP-398811610-172.17.0.2-1731857379530:blk_1073741850_1033 to mirror 127.0.0.1:36583 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:29:59,304 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36583 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:29:59,304 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:43738 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741850_1033] {}] datanode.BlockReceiver(316): Block 1073741850 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-17T15:29:59,304 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37341,DS-9aafc437-98aa-4702-8637-e862e802d273,DISK], DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK]) is bad. 2024-11-17T15:29:59,304 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741850_1033 2024-11-17T15:29:59,304 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:43738 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741850_1033] {}] datanode.DataXceiver(331): 127.0.0.1:37341:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43738 dst: /127.0.0.1:37341 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:29:59,305 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK] 2024-11-17T15:29:59,305 WARN [IPC Server handler 4 on default port 42915 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-17T15:29:59,305 WARN [IPC Server handler 4 on default port 42915 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-17T15:29:59,305 WARN [IPC Server handler 4 on default port 42915 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-17T15:29:59,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37341 is added to blk_1073741851_1034 (size=12506) 2024-11-17T15:29:59,709 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/.tmp/info/a700bcd1db67456dbf42ff162732f759 2024-11-17T15:29:59,718 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/.tmp/info/a700bcd1db67456dbf42ff162732f759 as hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/a700bcd1db67456dbf42ff162732f759 2024-11-17T15:29:59,725 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/a700bcd1db67456dbf42ff162732f759, entries=7, sequenceid=24, filesize=12.2 K 2024-11-17T15:29:59,727 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for b38f27f49ebe8200b4255f09e391ea9b in 438ms, sequenceid=24, compaction requested=false 2024-11-17T15:29:59,727 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b38f27f49ebe8200b4255f09e391ea9b: 2024-11-17T15:29:59,727 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-17T15:29:59,727 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T15:29:59,727 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/a700bcd1db67456dbf42ff162732f759 because midkey is the same as first or last row 2024-11-17T15:29:59,782 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:00,638 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:00,652 WARN [regionserver/7a780d55532c:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37341,DS-9aafc437-98aa-4702-8637-e862e802d273,DISK]] 2024-11-17T15:30:00,652 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:00,653 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7a780d55532c%2C38057%2C1731857380513:(num 1731857396635) roll requested 2024-11-17T15:30:00,653 INFO [regionserver/7a780d55532c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C38057%2C1731857380513.1731857400653 2024-11-17T15:30:00,656 WARN [Thread-936 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:00,656 WARN [Thread-936 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK], DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK]) is bad. 2024-11-17T15:30:00,656 WARN [Thread-936 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741852_1035 2024-11-17T15:30:00,657 WARN [Thread-936 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK] 2024-11-17T15:30:00,658 WARN [Thread-936 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:00,658 WARN [Thread-936 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK], DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]) is bad. 2024-11-17T15:30:00,658 WARN [Thread-936 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741853_1036 2024-11-17T15:30:00,659 WARN [Thread-936 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK] 2024-11-17T15:30:00,659 WARN [Thread-936 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:00,660 WARN [Thread-936 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK], DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK]) is bad. 2024-11-17T15:30:00,660 WARN [Thread-936 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741854_1037 2024-11-17T15:30:00,660 WARN [Thread-936 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK] 2024-11-17T15:30:00,661 WARN [Thread-936 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:00,662 WARN [Thread-936 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK], DatanodeInfoWithStorage[127.0.0.1:37341,DS-9aafc437-98aa-4702-8637-e862e802d273,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK]) is bad. 2024-11-17T15:30:00,662 WARN [Thread-936 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741855_1038 2024-11-17T15:30:00,662 WARN [Thread-936 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK] 2024-11-17T15:30:00,663 WARN [IPC Server handler 1 on default port 42915 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-17T15:30:00,663 WARN [IPC Server handler 1 on default port 42915 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-17T15:30:00,663 WARN [IPC Server handler 1 on default port 42915 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-17T15:30:00,666 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:00,666 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:00,666 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:00,666 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:00,666 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:00,666 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857396635 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857400653 2024-11-17T15:30:00,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37341 is added to blk_1073741841_1024 (size=25992) 2024-11-17T15:30:00,669 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34697:34697)] 2024-11-17T15:30:00,669 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857381092 is not closed yet, will try archiving it next time 2024-11-17T15:30:00,669 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857396635 is not closed yet, will try archiving it next time 2024-11-17T15:30:00,669 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857392624 to hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/oldWALs/7a780d55532c%2C38057%2C1731857380513.1731857392624 2024-11-17T15:30:00,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38057 {}] regionserver.HRegion(8855): Flush requested on b38f27f49ebe8200b4255f09e391ea9b 2024-11-17T15:30:00,707 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b38f27f49ebe8200b4255f09e391ea9b 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-17T15:30:00,711 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/.tmp/info/7dbcf5a633484131af731267fa8c584b is 1079, key is tmprow/info:/1731857400706/Put/seqid=0 2024-11-17T15:30:00,713 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:00,713 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK], DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK]) is bad. 2024-11-17T15:30:00,713 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741857_1040 2024-11-17T15:30:00,714 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK] 2024-11-17T15:30:00,715 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:00,715 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK], DatanodeInfoWithStorage[127.0.0.1:37341,DS-9aafc437-98aa-4702-8637-e862e802d273,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]) is bad. 2024-11-17T15:30:00,715 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741858_1041 2024-11-17T15:30:00,715 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK] 2024-11-17T15:30:00,716 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:00,716 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK], DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK]) is bad. 2024-11-17T15:30:00,716 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741859_1042 2024-11-17T15:30:00,717 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK] 2024-11-17T15:30:00,718 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:00,718 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK], DatanodeInfoWithStorage[127.0.0.1:37341,DS-9aafc437-98aa-4702-8637-e862e802d273,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK]) is bad. 2024-11-17T15:30:00,718 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741860_1043 2024-11-17T15:30:00,718 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK] 2024-11-17T15:30:00,719 WARN [IPC Server handler 4 on default port 42915 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-17T15:30:00,719 WARN [IPC Server handler 4 on default port 42915 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-17T15:30:00,719 WARN [IPC Server handler 4 on default port 42915 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-17T15:30:00,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37341 is added to blk_1073741861_1044 (size=6027) 2024-11-17T15:30:01,069 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857381092 is not closed yet, will try archiving it next time 2024-11-17T15:30:01,123 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/.tmp/info/7dbcf5a633484131af731267fa8c584b 2024-11-17T15:30:01,130 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/.tmp/info/7dbcf5a633484131af731267fa8c584b as hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/7dbcf5a633484131af731267fa8c584b 2024-11-17T15:30:01,136 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/7dbcf5a633484131af731267fa8c584b, entries=1, sequenceid=34, filesize=5.9 K 2024-11-17T15:30:01,137 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for b38f27f49ebe8200b4255f09e391ea9b in 430ms, sequenceid=34, compaction requested=true 2024-11-17T15:30:01,138 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b38f27f49ebe8200b4255f09e391ea9b: 2024-11-17T15:30:01,138 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-17T15:30:01,138 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T15:30:01,138 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/a700bcd1db67456dbf42ff162732f759 because midkey is the same as first or last row 2024-11-17T15:30:01,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b38f27f49ebe8200b4255f09e391ea9b:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T15:30:01,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T15:30:01,138 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T15:30:01,139 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T15:30:01,139 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.HStore(1541): b38f27f49ebe8200b4255f09e391ea9b/info is initiating minor compaction (all files) 2024-11-17T15:30:01,140 INFO [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b38f27f49ebe8200b4255f09e391ea9b/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b. 2024-11-17T15:30:01,140 INFO [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/9f1e36294e3444908ad6f5604cb935f9, hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/a700bcd1db67456dbf42ff162732f759, hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/7dbcf5a633484131af731267fa8c584b] into tmpdir=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/.tmp, totalSize=28.2 K 2024-11-17T15:30:01,140 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9f1e36294e3444908ad6f5604cb935f9, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731857394644 2024-11-17T15:30:01,141 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] compactions.Compactor(225): Compacting a700bcd1db67456dbf42ff162732f759, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1731857398668 2024-11-17T15:30:01,141 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7dbcf5a633484131af731267fa8c584b, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731857400706 2024-11-17T15:30:01,155 INFO [RS:0;7a780d55532c:38057-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b38f27f49ebe8200b4255f09e391ea9b#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T15:30:01,155 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/.tmp/info/a3ca162feec54db3bff4968ce22b01fe is 1080, key is row0002/info:/1731857394644/Put/seqid=0 2024-11-17T15:30:01,157 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:01,157 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK], DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK]) is bad. 2024-11-17T15:30:01,157 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741862_1045 2024-11-17T15:30:01,158 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK] 2024-11-17T15:30:01,159 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34067 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:01,159 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:41220 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741863_1046] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data8]'}, localName='127.0.0.1:37341', datanodeUuid='42b6b50f-ae35-42e2-bf0f-012e79f43601', xmitsInProgress=0}:Exception transferring block BP-398811610-172.17.0.2-1731857379530:blk_1073741863_1046 to mirror 127.0.0.1:34067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:01,160 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37341,DS-9aafc437-98aa-4702-8637-e862e802d273,DISK], DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]) is bad. 2024-11-17T15:30:01,160 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741863_1046 2024-11-17T15:30:01,160 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:41220 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741863_1046] {}] datanode.BlockReceiver(316): Block 1073741863 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-17T15:30:01,160 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:41220 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741863_1046] {}] datanode.DataXceiver(331): 127.0.0.1:37341:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41220 dst: /127.0.0.1:37341 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:01,160 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK] 2024-11-17T15:30:01,161 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:01,161 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK], DatanodeInfoWithStorage[127.0.0.1:37341,DS-9aafc437-98aa-4702-8637-e862e802d273,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK]) is bad. 2024-11-17T15:30:01,161 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741864_1047 2024-11-17T15:30:01,162 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK] 2024-11-17T15:30:01,163 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:01,163 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK], DatanodeInfoWithStorage[127.0.0.1:37341,DS-9aafc437-98aa-4702-8637-e862e802d273,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK]) is bad. 2024-11-17T15:30:01,163 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741865_1048 2024-11-17T15:30:01,163 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK] 2024-11-17T15:30:01,164 WARN [IPC Server handler 2 on default port 42915 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-17T15:30:01,164 WARN [IPC Server handler 2 on default port 42915 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-17T15:30:01,164 WARN [IPC Server handler 2 on default port 42915 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-17T15:30:01,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37341 is added to blk_1073741866_1049 (size=17994) 2024-11-17T15:30:01,347 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@54fbe9d6[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37341, datanodeUuid=42b6b50f-ae35-42e2-bf0f-012e79f43601, infoPort=34697, infoSecurePort=0, ipcPort=35119, storageInfo=lv=-57;cid=testClusterID;nsid=378286705;c=1731857379530):Failed to transfer BP-398811610-172.17.0.2-1731857379530:blk_1073741846_1029 to 127.0.0.1:41873 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:01,347 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5a3c1d39[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37341, datanodeUuid=42b6b50f-ae35-42e2-bf0f-012e79f43601, infoPort=34697, infoSecurePort=0, ipcPort=35119, storageInfo=lv=-57;cid=testClusterID;nsid=378286705;c=1731857379530):Failed to transfer BP-398811610-172.17.0.2-1731857379530:blk_1073741851_1034 to 127.0.0.1:36583 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:01,575 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/.tmp/info/a3ca162feec54db3bff4968ce22b01fe as hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/a3ca162feec54db3bff4968ce22b01fe 2024-11-17T15:30:01,583 INFO [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b38f27f49ebe8200b4255f09e391ea9b/info of b38f27f49ebe8200b4255f09e391ea9b into a3ca162feec54db3bff4968ce22b01fe(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T15:30:01,583 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b38f27f49ebe8200b4255f09e391ea9b: 2024-11-17T15:30:01,583 INFO [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b., storeName=b38f27f49ebe8200b4255f09e391ea9b/info, priority=13, startTime=1731857401138; duration=0sec 2024-11-17T15:30:01,583 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-17T15:30:01,583 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T15:30:01,583 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/a3ca162feec54db3bff4968ce22b01fe because midkey is the same as first or last row 2024-11-17T15:30:01,584 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-17T15:30:01,584 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T15:30:01,584 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/a3ca162feec54db3bff4968ce22b01fe because midkey is the same as first or last row 2024-11-17T15:30:01,584 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-17T15:30:01,584 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T15:30:01,584 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/a3ca162feec54db3bff4968ce22b01fe because midkey is the same as first or last row 2024-11-17T15:30:01,584 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T15:30:01,584 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b38f27f49ebe8200b4255f09e391ea9b:info 2024-11-17T15:30:01,782 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:02,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38057 {}] regionserver.HRegion(8855): Flush requested on b38f27f49ebe8200b4255f09e391ea9b 2024-11-17T15:30:02,127 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b38f27f49ebe8200b4255f09e391ea9b 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-17T15:30:02,133 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/.tmp/info/f17674a10d654131af619311cce9ed0f is 1079, key is tmprow/info:/1731857402125/Put/seqid=0 2024-11-17T15:30:02,134 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:02,134 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK], DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]) is bad. 2024-11-17T15:30:02,135 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741867_1050 2024-11-17T15:30:02,135 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK] 2024-11-17T15:30:02,136 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:02,137 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK], DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK]) is bad. 2024-11-17T15:30:02,137 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741868_1051 2024-11-17T15:30:02,137 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK] 2024-11-17T15:30:02,139 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41901 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:02,139 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:41238 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741869_1052] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data8]'}, localName='127.0.0.1:37341', datanodeUuid='42b6b50f-ae35-42e2-bf0f-012e79f43601', xmitsInProgress=0}:Exception transferring block BP-398811610-172.17.0.2-1731857379530:blk_1073741869_1052 to mirror 127.0.0.1:41901 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:02,140 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37341,DS-9aafc437-98aa-4702-8637-e862e802d273,DISK], DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK]) is bad. 2024-11-17T15:30:02,140 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741869_1052 2024-11-17T15:30:02,140 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:41238 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741869_1052] {}] datanode.BlockReceiver(316): Block 1073741869 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-17T15:30:02,140 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:41238 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741869_1052] {}] datanode.DataXceiver(331): 127.0.0.1:37341:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41238 dst: /127.0.0.1:37341 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:02,140 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK] 2024-11-17T15:30:02,143 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41873 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:02,143 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:41242 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741870_1053] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data8]'}, localName='127.0.0.1:37341', datanodeUuid='42b6b50f-ae35-42e2-bf0f-012e79f43601', xmitsInProgress=0}:Exception transferring block BP-398811610-172.17.0.2-1731857379530:blk_1073741870_1053 to mirror 127.0.0.1:41873 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:02,143 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37341,DS-9aafc437-98aa-4702-8637-e862e802d273,DISK], DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK]) is bad. 2024-11-17T15:30:02,143 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741870_1053 2024-11-17T15:30:02,143 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:41242 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741870_1053] {}] datanode.BlockReceiver(316): Block 1073741870 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-17T15:30:02,143 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:41242 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741870_1053] {}] datanode.DataXceiver(331): 127.0.0.1:37341:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41242 dst: /127.0.0.1:37341 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:02,144 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK] 2024-11-17T15:30:02,145 WARN [IPC Server handler 1 on default port 42915 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-17T15:30:02,145 WARN [IPC Server handler 1 on default port 42915 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-17T15:30:02,145 WARN [IPC Server handler 1 on default port 42915 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-17T15:30:02,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37341 is added to blk_1073741871_1054 (size=6027) 2024-11-17T15:30:02,347 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5a3c1d39[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37341, datanodeUuid=42b6b50f-ae35-42e2-bf0f-012e79f43601, infoPort=34697, infoSecurePort=0, ipcPort=35119, storageInfo=lv=-57;cid=testClusterID;nsid=378286705;c=1731857379530):Failed to transfer BP-398811610-172.17.0.2-1731857379530:blk_1073741841_1024 to 127.0.0.1:41901 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:02,347 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@54fbe9d6[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37341, datanodeUuid=42b6b50f-ae35-42e2-bf0f-012e79f43601, infoPort=34697, infoSecurePort=0, ipcPort=35119, storageInfo=lv=-57;cid=testClusterID;nsid=378286705;c=1731857379530):Failed to transfer BP-398811610-172.17.0.2-1731857379530:blk_1073741861_1044 to 127.0.0.1:36583 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:02,550 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/.tmp/info/f17674a10d654131af619311cce9ed0f 2024-11-17T15:30:02,557 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/.tmp/info/f17674a10d654131af619311cce9ed0f as hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/f17674a10d654131af619311cce9ed0f 2024-11-17T15:30:02,563 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/f17674a10d654131af619311cce9ed0f, entries=1, sequenceid=45, filesize=5.9 K 2024-11-17T15:30:02,564 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for b38f27f49ebe8200b4255f09e391ea9b in 437ms, sequenceid=45, compaction requested=false 2024-11-17T15:30:02,564 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b38f27f49ebe8200b4255f09e391ea9b: 2024-11-17T15:30:02,564 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-17T15:30:02,564 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T15:30:02,564 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/a3ca162feec54db3bff4968ce22b01fe because midkey is the same as first or last row 2024-11-17T15:30:02,638 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:02,669 WARN [regionserver/7a780d55532c:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37341,DS-9aafc437-98aa-4702-8637-e862e802d273,DISK]] 2024-11-17T15:30:02,669 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:02,670 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7a780d55532c%2C38057%2C1731857380513:(num 1731857400653) roll requested 2024-11-17T15:30:02,670 INFO [regionserver/7a780d55532c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C38057%2C1731857380513.1731857402670 2024-11-17T15:30:02,673 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:02,673 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK], DatanodeInfoWithStorage[127.0.0.1:37341,DS-9aafc437-98aa-4702-8637-e862e802d273,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK]) is bad. 2024-11-17T15:30:02,673 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741872_1055 2024-11-17T15:30:02,673 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK] 2024-11-17T15:30:02,674 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:02,674 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK], DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK]) is bad. 2024-11-17T15:30:02,674 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741873_1056 2024-11-17T15:30:02,675 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK] 2024-11-17T15:30:02,676 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:02,676 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK], DatanodeInfoWithStorage[127.0.0.1:37341,DS-9aafc437-98aa-4702-8637-e862e802d273,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK]) is bad. 2024-11-17T15:30:02,676 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741874_1057 2024-11-17T15:30:02,676 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK] 2024-11-17T15:30:02,678 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34067 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:02,678 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:41264 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741875_1058] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data8]'}, localName='127.0.0.1:37341', datanodeUuid='42b6b50f-ae35-42e2-bf0f-012e79f43601', xmitsInProgress=0}:Exception transferring block BP-398811610-172.17.0.2-1731857379530:blk_1073741875_1058 to mirror 127.0.0.1:34067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:02,678 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37341,DS-9aafc437-98aa-4702-8637-e862e802d273,DISK], DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]) is bad. 2024-11-17T15:30:02,678 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741875_1058 2024-11-17T15:30:02,679 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:41264 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741875_1058] {}] datanode.BlockReceiver(316): Block 1073741875 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-17T15:30:02,679 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:41264 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741875_1058] {}] datanode.DataXceiver(331): 127.0.0.1:37341:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41264 dst: /127.0.0.1:37341 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:02,679 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK] 2024-11-17T15:30:02,679 WARN [IPC Server handler 1 on default port 42915 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-17T15:30:02,680 WARN [IPC Server handler 1 on default port 42915 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-17T15:30:02,680 WARN [IPC Server handler 1 on default port 42915 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-17T15:30:02,682 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:02,682 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:02,682 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:02,682 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:02,682 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:02,682 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857400653 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857402670 2024-11-17T15:30:02,683 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34697:34697)] 2024-11-17T15:30:02,684 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857381092 is not closed yet, will try archiving it next time 2024-11-17T15:30:02,684 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857400653 is not closed yet, will try archiving it next time 2024-11-17T15:30:02,684 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857396635 to hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/oldWALs/7a780d55532c%2C38057%2C1731857380513.1731857396635 2024-11-17T15:30:02,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37341 is added to blk_1073741856_1039 (size=13591) 2024-11-17T15:30:02,684 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857381092 is not closed yet, will try archiving it next time 2024-11-17T15:30:03,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38057 {}] regionserver.HRegion(8855): Flush requested on b38f27f49ebe8200b4255f09e391ea9b 2024-11-17T15:30:03,546 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b38f27f49ebe8200b4255f09e391ea9b 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-17T15:30:03,551 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/.tmp/info/13da304fa83b4b9cad35badd4a18d350 is 1079, key is tmprow/info:/1731857403545/Put/seqid=0 2024-11-17T15:30:03,553 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:03,553 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK], DatanodeInfoWithStorage[127.0.0.1:37341,DS-9aafc437-98aa-4702-8637-e862e802d273,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK]) is bad. 2024-11-17T15:30:03,553 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741877_1060 2024-11-17T15:30:03,554 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK] 2024-11-17T15:30:03,555 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:03,555 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK], DatanodeInfoWithStorage[127.0.0.1:37341,DS-9aafc437-98aa-4702-8637-e862e802d273,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]) is bad. 2024-11-17T15:30:03,555 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741878_1061 2024-11-17T15:30:03,556 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK] 2024-11-17T15:30:03,557 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:03,557 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK], DatanodeInfoWithStorage[127.0.0.1:37341,DS-9aafc437-98aa-4702-8637-e862e802d273,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK]) is bad. 2024-11-17T15:30:03,557 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741879_1062 2024-11-17T15:30:03,557 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK] 2024-11-17T15:30:03,559 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:03,559 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK], DatanodeInfoWithStorage[127.0.0.1:37341,DS-9aafc437-98aa-4702-8637-e862e802d273,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK]) is bad. 2024-11-17T15:30:03,559 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741880_1063 2024-11-17T15:30:03,559 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK] 2024-11-17T15:30:03,560 WARN [IPC Server handler 2 on default port 42915 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-17T15:30:03,560 WARN [IPC Server handler 2 on default port 42915 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-17T15:30:03,560 WARN [IPC Server handler 2 on default port 42915 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-17T15:30:03,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37341 is added to blk_1073741881_1064 (size=6027) 2024-11-17T15:30:03,782 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:03,964 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/.tmp/info/13da304fa83b4b9cad35badd4a18d350 2024-11-17T15:30:03,972 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/.tmp/info/13da304fa83b4b9cad35badd4a18d350 as hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/13da304fa83b4b9cad35badd4a18d350 2024-11-17T15:30:03,978 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/13da304fa83b4b9cad35badd4a18d350, entries=1, sequenceid=55, filesize=5.9 K 2024-11-17T15:30:03,979 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for b38f27f49ebe8200b4255f09e391ea9b in 433ms, sequenceid=55, compaction requested=true 2024-11-17T15:30:03,979 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b38f27f49ebe8200b4255f09e391ea9b: 2024-11-17T15:30:03,979 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-17T15:30:03,979 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T15:30:03,979 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/a3ca162feec54db3bff4968ce22b01fe because midkey is the same as first or last row 2024-11-17T15:30:03,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b38f27f49ebe8200b4255f09e391ea9b:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T15:30:03,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T15:30:03,979 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T15:30:03,981 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T15:30:03,981 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.HStore(1541): b38f27f49ebe8200b4255f09e391ea9b/info is initiating minor compaction (all files) 2024-11-17T15:30:03,981 INFO [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b38f27f49ebe8200b4255f09e391ea9b/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b. 2024-11-17T15:30:03,981 INFO [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/a3ca162feec54db3bff4968ce22b01fe, hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/f17674a10d654131af619311cce9ed0f, hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/13da304fa83b4b9cad35badd4a18d350] into tmpdir=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/.tmp, totalSize=29.3 K 2024-11-17T15:30:03,981 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] compactions.Compactor(225): Compacting a3ca162feec54db3bff4968ce22b01fe, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731857394644 2024-11-17T15:30:03,982 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] compactions.Compactor(225): Compacting f17674a10d654131af619311cce9ed0f, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731857402125 2024-11-17T15:30:03,982 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] compactions.Compactor(225): Compacting 13da304fa83b4b9cad35badd4a18d350, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731857403545 2024-11-17T15:30:03,997 INFO [RS:0;7a780d55532c:38057-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b38f27f49ebe8200b4255f09e391ea9b#info#compaction#24 average throughput is 4.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T15:30:03,997 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/.tmp/info/1769c91bc16647d4b3c1c79492c729e4 is 1080, key is row0002/info:/1731857394644/Put/seqid=0 2024-11-17T15:30:04,000 WARN [Thread-970 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36583 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:04,000 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:41292 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741882_1065] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data8]'}, localName='127.0.0.1:37341', datanodeUuid='42b6b50f-ae35-42e2-bf0f-012e79f43601', xmitsInProgress=0}:Exception transferring block BP-398811610-172.17.0.2-1731857379530:blk_1073741882_1065 to mirror 127.0.0.1:36583 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:04,000 WARN [Thread-970 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37341,DS-9aafc437-98aa-4702-8637-e862e802d273,DISK], DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK]) is bad. 2024-11-17T15:30:04,000 WARN [Thread-970 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741882_1065 2024-11-17T15:30:04,000 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:41292 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741882_1065] {}] datanode.BlockReceiver(316): Block 1073741882 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-17T15:30:04,000 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:41292 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741882_1065] {}] datanode.DataXceiver(331): 127.0.0.1:37341:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41292 dst: /127.0.0.1:37341 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:04,001 WARN [Thread-970 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK] 2024-11-17T15:30:04,003 WARN [Thread-970 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41901 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:04,003 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:41304 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741883_1066] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data8]'}, localName='127.0.0.1:37341', datanodeUuid='42b6b50f-ae35-42e2-bf0f-012e79f43601', xmitsInProgress=0}:Exception transferring block BP-398811610-172.17.0.2-1731857379530:blk_1073741883_1066 to mirror 127.0.0.1:41901 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:04,003 WARN [Thread-970 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37341,DS-9aafc437-98aa-4702-8637-e862e802d273,DISK], DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK]) is bad. 2024-11-17T15:30:04,003 WARN [Thread-970 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741883_1066 2024-11-17T15:30:04,003 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:41304 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741883_1066] {}] datanode.BlockReceiver(316): Block 1073741883 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-17T15:30:04,003 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:41304 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741883_1066] {}] datanode.DataXceiver(331): 127.0.0.1:37341:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41304 dst: /127.0.0.1:37341 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:04,003 WARN [Thread-970 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK] 2024-11-17T15:30:04,004 WARN [Thread-970 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:04,005 WARN [Thread-970 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK], DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK]) is bad. 2024-11-17T15:30:04,005 WARN [Thread-970 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741884_1067 2024-11-17T15:30:04,005 WARN [Thread-970 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41873,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK] 2024-11-17T15:30:04,006 WARN [Thread-970 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:04,006 WARN [Thread-970 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK], DatanodeInfoWithStorage[127.0.0.1:37341,DS-9aafc437-98aa-4702-8637-e862e802d273,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]) is bad. 2024-11-17T15:30:04,006 WARN [Thread-970 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741885_1068 2024-11-17T15:30:04,007 WARN [Thread-970 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK] 2024-11-17T15:30:04,007 WARN [IPC Server handler 0 on default port 42915 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-17T15:30:04,008 WARN [IPC Server handler 0 on default port 42915 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-17T15:30:04,008 WARN [IPC Server handler 0 on default port 42915 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-17T15:30:04,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37341 is added to blk_1073741886_1069 (size=18097) 2024-11-17T15:30:04,347 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5a3c1d39[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37341, datanodeUuid=42b6b50f-ae35-42e2-bf0f-012e79f43601, infoPort=34697, infoSecurePort=0, ipcPort=35119, storageInfo=lv=-57;cid=testClusterID;nsid=378286705;c=1731857379530):Failed to transfer BP-398811610-172.17.0.2-1731857379530:blk_1073741871_1054 to 127.0.0.1:41873 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:04,347 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@54fbe9d6[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37341, datanodeUuid=42b6b50f-ae35-42e2-bf0f-012e79f43601, infoPort=34697, infoSecurePort=0, ipcPort=35119, storageInfo=lv=-57;cid=testClusterID;nsid=378286705;c=1731857379530):Failed to transfer BP-398811610-172.17.0.2-1731857379530:blk_1073741866_1049 to 127.0.0.1:34067 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:04,421 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/.tmp/info/1769c91bc16647d4b3c1c79492c729e4 as hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/1769c91bc16647d4b3c1c79492c729e4 2024-11-17T15:30:04,434 INFO [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b38f27f49ebe8200b4255f09e391ea9b/info of b38f27f49ebe8200b4255f09e391ea9b into 1769c91bc16647d4b3c1c79492c729e4(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T15:30:04,434 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b38f27f49ebe8200b4255f09e391ea9b: 2024-11-17T15:30:04,434 INFO [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b., storeName=b38f27f49ebe8200b4255f09e391ea9b/info, priority=13, startTime=1731857403979; duration=0sec 2024-11-17T15:30:04,434 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-17T15:30:04,434 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T15:30:04,434 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/1769c91bc16647d4b3c1c79492c729e4 because midkey is the same as first or last row 2024-11-17T15:30:04,434 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-17T15:30:04,434 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T15:30:04,434 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/1769c91bc16647d4b3c1c79492c729e4 because midkey is the same as first or last row 2024-11-17T15:30:04,434 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-17T15:30:04,434 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T15:30:04,435 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/1769c91bc16647d4b3c1c79492c729e4 because midkey is the same as first or last row 2024-11-17T15:30:04,435 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T15:30:04,435 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b38f27f49ebe8200b4255f09e391ea9b:info 2024-11-17T15:30:04,639 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:04,684 WARN [regionserver/7a780d55532c:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-17T15:30:04,684 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:04,774 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:30:04,778 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T15:30:04,786 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T15:30:04,786 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T15:30:04,786 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T15:30:04,788 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@43ad4e97{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/hadoop.log.dir/,AVAILABLE} 2024-11-17T15:30:04,789 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7eb80544{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T15:30:04,929 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@74f37d73{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/java.io.tmpdir/jetty-localhost-36269-hadoop-hdfs-3_4_1-tests_jar-_-any-6423249875643634023/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:30:04,930 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2e73b100{HTTP/1.1, (http/1.1)}{localhost:36269} 2024-11-17T15:30:04,930 INFO [Time-limited test {}] server.Server(415): Started @128852ms 2024-11-17T15:30:04,931 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T15:30:05,037 WARN [Thread-990 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T15:30:05,046 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5b354154623e8705 with lease ID 0x6bf99bb86d5c778a: from storage DS-e57b16a9-d350-4424-9cda-497c30bb93e9 node DatanodeRegistration(127.0.0.1:35847, datanodeUuid=cad15bd0-c8d1-4c46-ba45-a08acfe53d98, infoPort=35731, infoSecurePort=0, ipcPort=33589, storageInfo=lv=-57;cid=testClusterID;nsid=378286705;c=1731857379530), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-17T15:30:05,046 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5b354154623e8705 with lease ID 0x6bf99bb86d5c778a: from storage DS-c5886253-11c7-46b4-880b-716675bbbc50 node DatanodeRegistration(127.0.0.1:35847, datanodeUuid=cad15bd0-c8d1-4c46-ba45-a08acfe53d98, infoPort=35731, infoSecurePort=0, ipcPort=33589, storageInfo=lv=-57;cid=testClusterID;nsid=378286705;c=1731857379530), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T15:30:05,347 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5a3c1d39[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37341, datanodeUuid=42b6b50f-ae35-42e2-bf0f-012e79f43601, infoPort=34697, infoSecurePort=0, ipcPort=35119, storageInfo=lv=-57;cid=testClusterID;nsid=378286705;c=1731857379530):Failed to transfer BP-398811610-172.17.0.2-1731857379530:blk_1073741881_1064 to 127.0.0.1:41901 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:05,347 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@54fbe9d6[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37341, datanodeUuid=42b6b50f-ae35-42e2-bf0f-012e79f43601, infoPort=34697, infoSecurePort=0, ipcPort=35119, storageInfo=lv=-57;cid=testClusterID;nsid=378286705;c=1731857379530):Failed to transfer BP-398811610-172.17.0.2-1731857379530:blk_1073741856_1039 to 127.0.0.1:34067 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:05,783 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:06,639 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:06,684 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:07,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35847 is added to blk_1073741886_1069 (size=18097) 2024-11-17T15:30:07,783 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:08,639 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:08,685 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:09,784 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:10,401 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-17T15:30:10,640 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:10,685 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:10,822 ERROR [FSHLog-0-hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData-prefix:7a780d55532c,33923,1731857380422 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:10,822 WARN [FSHLog-0-hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData-prefix:7a780d55532c,33923,1731857380422 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:10,823 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 7a780d55532c%2C33923%2C1731857380422:(num 1731857380683) roll requested 2024-11-17T15:30:10,823 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C33923%2C1731857380422.1731857410823 2024-11-17T15:30:10,829 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:10,829 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:10,829 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:10,829 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:10,829 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:10,830 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/WALs/7a780d55532c,33923,1731857380422/7a780d55532c%2C33923%2C1731857380422.1731857380683 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/WALs/7a780d55532c,33923,1731857380422/7a780d55532c%2C33923%2C1731857380422.1731857410823 2024-11-17T15:30:10,830 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:10,830 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:10,830 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/WALs/7a780d55532c,33923,1731857380422/7a780d55532c%2C33923%2C1731857380422.1731857380683 2024-11-17T15:30:10,830 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34697:34697),(127.0.0.1/127.0.0.1:35731:35731)] 2024-11-17T15:30:10,831 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/WALs/7a780d55532c,33923,1731857380422/7a780d55532c%2C33923%2C1731857380422.1731857380683 is not closed yet, will try archiving it next time 2024-11-17T15:30:10,831 WARN [IPC Server handler 1 on default port 42915 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/WALs/7a780d55532c,33923,1731857380422/7a780d55532c%2C33923%2C1731857380422.1731857380683 has not been closed. Lease recovery is in progress. RecoveryId = 1071 for block blk_1073741830_1006 2024-11-17T15:30:10,831 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/WALs/7a780d55532c,33923,1731857380422/7a780d55532c%2C33923%2C1731857380422.1731857380683 after 1ms 2024-11-17T15:30:11,784 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:12,686 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:13,784 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:14,686 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:14,832 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/WALs/7a780d55532c,33923,1731857380422/7a780d55532c%2C33923%2C1731857380422.1731857380683 after 4002ms 2024-11-17T15:30:15,060 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@21c21383 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-398811610-172.17.0.2-1731857379530:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:34067,null,null]) java.net.ConnectException: Call From 7a780d55532c/172.17.0.2 to localhost:38563 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-17T15:30:15,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35847 is added to blk_1073741833_1019 (size=455) 2024-11-17T15:30:15,661 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857381092 to hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/oldWALs/7a780d55532c%2C38057%2C1731857380513.1731857381092 2024-11-17T15:30:15,662 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857400653 to hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/oldWALs/7a780d55532c%2C38057%2C1731857380513.1731857400653 2024-11-17T15:30:15,785 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:16,041 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@10291747[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35847, datanodeUuid=cad15bd0-c8d1-4c46-ba45-a08acfe53d98, infoPort=35731, infoSecurePort=0, ipcPort=33589, storageInfo=lv=-57;cid=testClusterID;nsid=378286705;c=1731857379530):Failed to transfer BP-398811610-172.17.0.2-1731857379530:blk_1073741833_1019 to 127.0.0.1:41901 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:16,687 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:17,785 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:18,269 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C38057%2C1731857380513.1731857418269 2024-11-17T15:30:18,272 WARN [Thread-1021 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1072 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:18,272 WARN [Thread-1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741888_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK], DatanodeInfoWithStorage[127.0.0.1:37341,DS-9aafc437-98aa-4702-8637-e862e802d273,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK]) is bad. 2024-11-17T15:30:18,272 WARN [Thread-1021 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741888_1072 2024-11-17T15:30:18,272 WARN [Thread-1021 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK] 2024-11-17T15:30:18,274 WARN [Thread-1021 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1073 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:18,274 WARN [Thread-1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741889_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK], DatanodeInfoWithStorage[127.0.0.1:37341,DS-9aafc437-98aa-4702-8637-e862e802d273,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK]) is bad. 2024-11-17T15:30:18,274 WARN [Thread-1021 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741889_1073 2024-11-17T15:30:18,274 WARN [Thread-1021 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK] 2024-11-17T15:30:18,278 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:18,279 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:18,279 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:18,279 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:18,279 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:18,279 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857402670 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857418269 2024-11-17T15:30:18,280 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35731:35731),(127.0.0.1/127.0.0.1:34697:34697)] 2024-11-17T15:30:18,280 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857402670 is not closed yet, will try archiving it next time 2024-11-17T15:30:18,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37341 is added to blk_1073741876_1059 (size=12911) 2024-11-17T15:30:18,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38057 {}] regionserver.HRegion(8855): Flush requested on b38f27f49ebe8200b4255f09e391ea9b 2024-11-17T15:30:18,284 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b38f27f49ebe8200b4255f09e391ea9b 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-17T15:30:18,289 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/.tmp/info/d171ff1572574006bb1a346b4200d351 is 1080, key is row0013/info:/1731857418281/Put/seqid=0 2024-11-17T15:30:18,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37341 is added to blk_1073741891_1075 (size=8190) 2024-11-17T15:30:18,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35847 is added to blk_1073741891_1075 (size=8190) 2024-11-17T15:30:18,294 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/.tmp/info/d171ff1572574006bb1a346b4200d351 2024-11-17T15:30:18,301 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/.tmp/info/d171ff1572574006bb1a346b4200d351 as hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/d171ff1572574006bb1a346b4200d351 2024-11-17T15:30:18,307 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/d171ff1572574006bb1a346b4200d351, entries=3, sequenceid=66, filesize=8.0 K 2024-11-17T15:30:18,308 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=8.41 KB/8608 for b38f27f49ebe8200b4255f09e391ea9b in 23ms, sequenceid=66, compaction requested=false 2024-11-17T15:30:18,308 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b38f27f49ebe8200b4255f09e391ea9b: 2024-11-17T15:30:18,308 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-11-17T15:30:18,308 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T15:30:18,308 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/1769c91bc16647d4b3c1c79492c729e4 because midkey is the same as first or last row 2024-11-17T15:30:18,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38057 {}] regionserver.HRegion(8855): Flush requested on b38f27f49ebe8200b4255f09e391ea9b 2024-11-17T15:30:18,308 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b38f27f49ebe8200b4255f09e391ea9b 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-17T15:30:18,312 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/.tmp/info/a0341f3dd8c64849b1817aea8e8e7f4b is 1080, key is row0015/info:/1731857418285/Put/seqid=0 2024-11-17T15:30:18,314 WARN [Thread-1035 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741892_1076 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:18,314 WARN [Thread-1035 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741892_1076 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK], DatanodeInfoWithStorage[127.0.0.1:37341,DS-9aafc437-98aa-4702-8637-e862e802d273,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK]) is bad. 2024-11-17T15:30:18,314 WARN [Thread-1035 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741892_1076 2024-11-17T15:30:18,314 WARN [Thread-1035 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK] 2024-11-17T15:30:18,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37341 is added to blk_1073741893_1077 (size=14660) 2024-11-17T15:30:18,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35847 is added to blk_1073741893_1077 (size=14660) 2024-11-17T15:30:18,319 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/.tmp/info/a0341f3dd8c64849b1817aea8e8e7f4b 2024-11-17T15:30:18,325 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/.tmp/info/a0341f3dd8c64849b1817aea8e8e7f4b as hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/a0341f3dd8c64849b1817aea8e8e7f4b 2024-11-17T15:30:18,329 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/a0341f3dd8c64849b1817aea8e8e7f4b, entries=9, sequenceid=78, filesize=14.3 K 2024-11-17T15:30:18,331 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for b38f27f49ebe8200b4255f09e391ea9b in 22ms, sequenceid=78, compaction requested=true 2024-11-17T15:30:18,331 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b38f27f49ebe8200b4255f09e391ea9b: 2024-11-17T15:30:18,331 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=40.0 K, sizeToCheck=16.0 K 2024-11-17T15:30:18,331 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T15:30:18,331 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/1769c91bc16647d4b3c1c79492c729e4 because midkey is the same as first or last row 2024-11-17T15:30:18,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b38f27f49ebe8200b4255f09e391ea9b:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T15:30:18,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T15:30:18,331 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T15:30:18,332 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40947 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T15:30:18,332 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.HStore(1541): b38f27f49ebe8200b4255f09e391ea9b/info is initiating minor compaction (all files) 2024-11-17T15:30:18,332 INFO [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b38f27f49ebe8200b4255f09e391ea9b/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b. 2024-11-17T15:30:18,332 INFO [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/1769c91bc16647d4b3c1c79492c729e4, hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/d171ff1572574006bb1a346b4200d351, hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/a0341f3dd8c64849b1817aea8e8e7f4b] into tmpdir=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/.tmp, totalSize=40.0 K 2024-11-17T15:30:18,333 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1769c91bc16647d4b3c1c79492c729e4, keycount=12, bloomtype=ROW, size=17.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731857394644 2024-11-17T15:30:18,333 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] compactions.Compactor(225): Compacting d171ff1572574006bb1a346b4200d351, keycount=3, bloomtype=ROW, size=8.0 K, encoding=NONE, compression=NONE, seqNum=66, earliestPutTs=1731857404562 2024-11-17T15:30:18,334 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] compactions.Compactor(225): Compacting a0341f3dd8c64849b1817aea8e8e7f4b, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1731857418285 2024-11-17T15:30:18,348 INFO [RS:0;7a780d55532c:38057-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b38f27f49ebe8200b4255f09e391ea9b#info#compaction#27 average throughput is 7.52 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T15:30:18,349 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/.tmp/info/7f7ce43573bf479e8aba188992a85b7b is 1080, key is row0002/info:/1731857394644/Put/seqid=0 2024-11-17T15:30:18,350 WARN [Thread-1042 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1078 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:18,351 WARN [Thread-1042 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741894_1078 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK], DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK]) is bad. 2024-11-17T15:30:18,351 WARN [Thread-1042 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741894_1078 2024-11-17T15:30:18,351 WARN [Thread-1042 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41901,DS-1ad1e33f-2cbd-4bbd-9c08-590e72bb767d,DISK] 2024-11-17T15:30:18,352 WARN [Thread-1042 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741895_1079 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:18,353 WARN [Thread-1042 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741895_1079 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK], DatanodeInfoWithStorage[127.0.0.1:35847,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK]) is bad. 2024-11-17T15:30:18,353 WARN [Thread-1042 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741895_1079 2024-11-17T15:30:18,353 WARN [Thread-1042 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK] 2024-11-17T15:30:18,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37341 is added to blk_1073741896_1080 (size=28989) 2024-11-17T15:30:18,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35847 is added to blk_1073741896_1080 (size=28989) 2024-11-17T15:30:18,365 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/.tmp/info/7f7ce43573bf479e8aba188992a85b7b as hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/7f7ce43573bf479e8aba188992a85b7b 2024-11-17T15:30:18,373 INFO [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b38f27f49ebe8200b4255f09e391ea9b/info of b38f27f49ebe8200b4255f09e391ea9b into 7f7ce43573bf479e8aba188992a85b7b(size=28.3 K), total size for store is 28.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T15:30:18,373 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b38f27f49ebe8200b4255f09e391ea9b: 2024-11-17T15:30:18,373 INFO [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b., storeName=b38f27f49ebe8200b4255f09e391ea9b/info, priority=13, startTime=1731857418331; duration=0sec 2024-11-17T15:30:18,373 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-17T15:30:18,373 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T15:30:18,373 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/7f7ce43573bf479e8aba188992a85b7b because midkey is the same as first or last row 2024-11-17T15:30:18,373 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-17T15:30:18,373 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T15:30:18,373 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/7f7ce43573bf479e8aba188992a85b7b because midkey is the same as first or last row 2024-11-17T15:30:18,373 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-17T15:30:18,373 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T15:30:18,374 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/7f7ce43573bf479e8aba188992a85b7b because midkey is the same as first or last row 2024-11-17T15:30:18,374 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T15:30:18,374 DEBUG [RS:0;7a780d55532c:38057-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b38f27f49ebe8200b4255f09e391ea9b:info 2024-11-17T15:30:18,681 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.1731857402670 to hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/oldWALs/7a780d55532c%2C38057%2C1731857380513.1731857402670 2024-11-17T15:30:18,687 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-17T15:30:18,687 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:18,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-17T15:30:18,712 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T15:30:18,712 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T15:30:18,712 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:30:18,712 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:30:18,712 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-17T15:30:18,712 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-17T15:30:18,712 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1750200013, stopped=false 2024-11-17T15:30:18,712 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7a780d55532c,33923,1731857380422 2024-11-17T15:30:18,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38057-0x101268c5ec30001, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T15:30:18,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33923-0x101268c5ec30000, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T15:30:18,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35049-0x101268c5ec30002, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T15:30:18,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38057-0x101268c5ec30001, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:30:18,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33923-0x101268c5ec30000, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:30:18,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35049-0x101268c5ec30002, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:30:18,715 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T15:30:18,715 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T15:30:18,715 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T15:30:18,715 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:30:18,715 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7a780d55532c,38057,1731857380513' ***** 2024-11-17T15:30:18,716 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-17T15:30:18,716 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7a780d55532c,35049,1731857381690' ***** 2024-11-17T15:30:18,716 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-17T15:30:18,716 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33923-0x101268c5ec30000, quorum=127.0.0.1:53267, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T15:30:18,716 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38057-0x101268c5ec30001, quorum=127.0.0.1:53267, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T15:30:18,716 INFO [RS:1;7a780d55532c:35049 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-17T15:30:18,716 INFO [RS:0;7a780d55532c:38057 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-17T15:30:18,716 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-17T15:30:18,716 INFO [RS:1;7a780d55532c:35049 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-17T15:30:18,716 INFO [RS:0;7a780d55532c:38057 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-17T15:30:18,716 INFO [RS:1;7a780d55532c:35049 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-17T15:30:18,716 INFO [RS:1;7a780d55532c:35049 {}] regionserver.HRegionServer(959): stopping server 7a780d55532c,35049,1731857381690 2024-11-17T15:30:18,716 INFO [RS:1;7a780d55532c:35049 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T15:30:18,716 INFO [RS:1;7a780d55532c:35049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;7a780d55532c:35049. 2024-11-17T15:30:18,716 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35049-0x101268c5ec30002, quorum=127.0.0.1:53267, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T15:30:18,716 INFO [RS:0;7a780d55532c:38057 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-17T15:30:18,716 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-17T15:30:18,716 DEBUG [RS:1;7a780d55532c:35049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T15:30:18,717 INFO [RS:0;7a780d55532c:38057 {}] regionserver.HRegionServer(3091): Received CLOSE for b38f27f49ebe8200b4255f09e391ea9b 2024-11-17T15:30:18,717 DEBUG [RS:1;7a780d55532c:35049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:30:18,717 INFO [RS:1;7a780d55532c:35049 {}] regionserver.HRegionServer(976): stopping server 7a780d55532c,35049,1731857381690; all regions closed. 2024-11-17T15:30:18,717 INFO [RS:0;7a780d55532c:38057 {}] regionserver.HRegionServer(959): stopping server 7a780d55532c,38057,1731857380513 2024-11-17T15:30:18,717 INFO [RS:0;7a780d55532c:38057 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T15:30:18,717 INFO [RS:0;7a780d55532c:38057 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7a780d55532c:38057. 2024-11-17T15:30:18,717 DEBUG [RS:0;7a780d55532c:38057 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T15:30:18,717 DEBUG [RS:0;7a780d55532c:38057 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:30:18,717 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing b38f27f49ebe8200b4255f09e391ea9b, disabling compactions & flushes 2024-11-17T15:30:18,717 INFO [RS:0;7a780d55532c:38057 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-17T15:30:18,717 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:18,717 INFO [RS:0;7a780d55532c:38057 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-17T15:30:18,718 INFO [RS:0;7a780d55532c:38057 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-17T15:30:18,717 INFO [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b. 2024-11-17T15:30:18,718 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b. 2024-11-17T15:30:18,718 INFO [RS:0;7a780d55532c:38057 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-17T15:30:18,718 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b. after waiting 0 ms 2024-11-17T15:30:18,718 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b. 2024-11-17T15:30:18,718 INFO [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing b38f27f49ebe8200b4255f09e391ea9b 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-17T15:30:18,718 INFO [RS:0;7a780d55532c:38057 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-17T15:30:18,718 DEBUG [RS:0;7a780d55532c:38057 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, b38f27f49ebe8200b4255f09e391ea9b=TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b.} 2024-11-17T15:30:18,718 DEBUG [RS:0;7a780d55532c:38057 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, b38f27f49ebe8200b4255f09e391ea9b 2024-11-17T15:30:18,718 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T15:30:18,718 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T15:30:18,718 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T15:30:18,718 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T15:30:18,718 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T15:30:18,718 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-17T15:30:18,718 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:18,719 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:18,719 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:18,719 ERROR [FSHLog-0-hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac-prefix:7a780d55532c,38057,1731857380513.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:18,719 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:18,719 WARN [FSHLog-0-hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac-prefix:7a780d55532c,38057,1731857380513.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:18,719 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7a780d55532c%2C38057%2C1731857380513.meta:.meta(num 1731857381524) roll requested 2024-11-17T15:30:18,719 INFO [regionserver/7a780d55532c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C38057%2C1731857380513.meta.1731857418719.meta 2024-11-17T15:30:18,719 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:18,719 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:18,719 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 2024-11-17T15:30:18,720 WARN [IPC Server handler 4 on default port 42915 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 has not been closed. Lease recovery is in progress. RecoveryId = 1081 for block blk_1073741837_1013 2024-11-17T15:30:18,720 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 after 1ms 2024-11-17T15:30:18,722 WARN [Thread-1048 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1082 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:18,722 WARN [Thread-1048 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741897_1082 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK], DatanodeInfoWithStorage[127.0.0.1:35847,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]) is bad. 2024-11-17T15:30:18,722 WARN [Thread-1048 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741897_1082 2024-11-17T15:30:18,723 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/.tmp/info/cfa211f57f5f465ab55bb2ccdf61ec77 is 1079, key is tmprow/info:/1731857418510/Put/seqid=0 2024-11-17T15:30:18,723 WARN [Thread-1048 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK] 2024-11-17T15:30:18,725 WARN [Thread-1049 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741898_1083 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36583 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:18,725 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:35036 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741898_1083] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data4]'}, localName='127.0.0.1:35847', datanodeUuid='cad15bd0-c8d1-4c46-ba45-a08acfe53d98', xmitsInProgress=0}:Exception transferring block BP-398811610-172.17.0.2-1731857379530:blk_1073741898_1083 to mirror 127.0.0.1:36583 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:18,725 WARN [Thread-1049 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741898_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35847,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK], DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK]) is bad. 2024-11-17T15:30:18,725 WARN [Thread-1049 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741898_1083 2024-11-17T15:30:18,725 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:35036 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741898_1083] {}] datanode.BlockReceiver(316): Block 1073741898 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-17T15:30:18,726 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:35036 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741898_1083] {}] datanode.DataXceiver(331): 127.0.0.1:35847:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35036 dst: /127.0.0.1:35847 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:18,726 WARN [Thread-1049 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK] 2024-11-17T15:30:18,729 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:18,729 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:18,729 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:18,729 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:18,729 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:18,730 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857418719.meta 2024-11-17T15:30:18,732 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:18,732 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:18,732 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta 2024-11-17T15:30:18,732 WARN [IPC Server handler 3 on default port 42915 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta has not been closed. Lease recovery is in progress. RecoveryId = 1086 for block blk_1073741834_1010 2024-11-17T15:30:18,733 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35731:35731),(127.0.0.1/127.0.0.1:34697:34697)] 2024-11-17T15:30:18,733 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta after 1ms 2024-11-17T15:30:18,733 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta is not closed yet, will try archiving it next time 2024-11-17T15:30:18,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37341 is added to blk_1073741900_1085 (size=6027) 2024-11-17T15:30:18,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35847 is added to blk_1073741900_1085 (size=6027) 2024-11-17T15:30:18,738 INFO [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=83 (bloomFilter=true), to=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/.tmp/info/cfa211f57f5f465ab55bb2ccdf61ec77 2024-11-17T15:30:18,744 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/.tmp/info/cfa211f57f5f465ab55bb2ccdf61ec77 as hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/cfa211f57f5f465ab55bb2ccdf61ec77 2024-11-17T15:30:18,750 INFO [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/cfa211f57f5f465ab55bb2ccdf61ec77, entries=1, sequenceid=83, filesize=5.9 K 2024-11-17T15:30:18,751 INFO [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1075, heapSize ~1.36 KB/1392, currentSize=0 B/0 for b38f27f49ebe8200b4255f09e391ea9b in 33ms, sequenceid=83, compaction requested=false 2024-11-17T15:30:18,751 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/hbase/meta/1588230740/.tmp/info/3184beb75d934abb847869bbef956fd8 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b./info:regioninfo/1731857382199/Put/seqid=0 2024-11-17T15:30:18,751 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/9f1e36294e3444908ad6f5604cb935f9, hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/a700bcd1db67456dbf42ff162732f759, hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/a3ca162feec54db3bff4968ce22b01fe, hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/7dbcf5a633484131af731267fa8c584b, hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/f17674a10d654131af619311cce9ed0f, hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/1769c91bc16647d4b3c1c79492c729e4, hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/13da304fa83b4b9cad35badd4a18d350, hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/d171ff1572574006bb1a346b4200d351, hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/a0341f3dd8c64849b1817aea8e8e7f4b] to archive 2024-11-17T15:30:18,753 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T15:30:18,753 WARN [Thread-1061 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741901_1087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:18,753 WARN [Thread-1061 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741901_1087 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK], DatanodeInfoWithStorage[127.0.0.1:37341,DS-9aafc437-98aa-4702-8637-e862e802d273,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK]) is bad. 2024-11-17T15:30:18,753 WARN [Thread-1061 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741901_1087 2024-11-17T15:30:18,754 WARN [Thread-1061 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK] 2024-11-17T15:30:18,755 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/9f1e36294e3444908ad6f5604cb935f9 to hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/9f1e36294e3444908ad6f5604cb935f9 2024-11-17T15:30:18,756 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/a700bcd1db67456dbf42ff162732f759 to hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/a700bcd1db67456dbf42ff162732f759 2024-11-17T15:30:18,758 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/a3ca162feec54db3bff4968ce22b01fe to hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/a3ca162feec54db3bff4968ce22b01fe 2024-11-17T15:30:18,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35847 is added to blk_1073741902_1088 (size=7089) 2024-11-17T15:30:18,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37341 is added to blk_1073741902_1088 (size=7089) 2024-11-17T15:30:18,759 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/hbase/meta/1588230740/.tmp/info/3184beb75d934abb847869bbef956fd8 2024-11-17T15:30:18,760 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/7dbcf5a633484131af731267fa8c584b to hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/7dbcf5a633484131af731267fa8c584b 2024-11-17T15:30:18,762 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/f17674a10d654131af619311cce9ed0f to hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/f17674a10d654131af619311cce9ed0f 2024-11-17T15:30:18,763 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/1769c91bc16647d4b3c1c79492c729e4 to hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/1769c91bc16647d4b3c1c79492c729e4 2024-11-17T15:30:18,764 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/13da304fa83b4b9cad35badd4a18d350 to hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/13da304fa83b4b9cad35badd4a18d350 2024-11-17T15:30:18,766 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/d171ff1572574006bb1a346b4200d351 to hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/d171ff1572574006bb1a346b4200d351 2024-11-17T15:30:18,767 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/a0341f3dd8c64849b1817aea8e8e7f4b to hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/info/a0341f3dd8c64849b1817aea8e8e7f4b 2024-11-17T15:30:18,768 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=7a780d55532c:33923 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-17T15:30:18,769 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [9f1e36294e3444908ad6f5604cb935f9=10347, a700bcd1db67456dbf42ff162732f759=12506, a3ca162feec54db3bff4968ce22b01fe=17994, 7dbcf5a633484131af731267fa8c584b=6027, f17674a10d654131af619311cce9ed0f=6027, 1769c91bc16647d4b3c1c79492c729e4=18097, 13da304fa83b4b9cad35badd4a18d350=6027, d171ff1572574006bb1a346b4200d351=8190, a0341f3dd8c64849b1817aea8e8e7f4b=14660] 2024-11-17T15:30:18,773 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b38f27f49ebe8200b4255f09e391ea9b/recovered.edits/86.seqid, newMaxSeqId=86, maxSeqId=1 2024-11-17T15:30:18,773 INFO [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b. 2024-11-17T15:30:18,773 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for b38f27f49ebe8200b4255f09e391ea9b: Waiting for close lock at 1731857418717Running coprocessor pre-close hooks at 1731857418717Disabling compacts and flushes for region at 1731857418717Disabling writes for close at 1731857418718 (+1 ms)Obtaining lock to block concurrent updates at 1731857418718Preparing flush snapshotting stores in b38f27f49ebe8200b4255f09e391ea9b at 1731857418718Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b., syncing WAL and waiting on mvcc, flushsize=dataSize=1075, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731857418718Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b. at 1731857418719 (+1 ms)Flushing b38f27f49ebe8200b4255f09e391ea9b/info: creating writer at 1731857418719Flushing b38f27f49ebe8200b4255f09e391ea9b/info: appending metadata at 1731857418722 (+3 ms)Flushing b38f27f49ebe8200b4255f09e391ea9b/info: closing flushed file at 1731857418722Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@26a2e727: reopening flushed file at 1731857418744 (+22 ms)Finished flush of dataSize ~1.05 KB/1075, heapSize ~1.36 KB/1392, currentSize=0 B/0 for b38f27f49ebe8200b4255f09e391ea9b in 33ms, sequenceid=83, compaction requested=false at 1731857418751 (+7 ms)Writing region close event to WAL at 1731857418769 (+18 ms)Running coprocessor post-close hooks at 1731857418773 (+4 ms)Closed at 1731857418773 2024-11-17T15:30:18,774 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731857381824.b38f27f49ebe8200b4255f09e391ea9b. 2024-11-17T15:30:18,782 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/hbase/meta/1588230740/.tmp/ns/10811f57f91a484191852df8d8e7eaad is 43, key is default/ns:d/1731857381617/Put/seqid=0 2024-11-17T15:30:18,783 WARN [Thread-1067 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741903_1089 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:18,783 WARN [Thread-1067 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741903_1089 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK], DatanodeInfoWithStorage[127.0.0.1:35847,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK]) is bad. 2024-11-17T15:30:18,783 WARN [Thread-1067 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741903_1089 2024-11-17T15:30:18,784 WARN [Thread-1067 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK] 2024-11-17T15:30:18,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37341 is added to blk_1073741904_1090 (size=5153) 2024-11-17T15:30:18,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35847 is added to blk_1073741904_1090 (size=5153) 2024-11-17T15:30:18,789 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/hbase/meta/1588230740/.tmp/ns/10811f57f91a484191852df8d8e7eaad 2024-11-17T15:30:18,811 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/hbase/meta/1588230740/.tmp/table/aaa386912a7f476fbd4b56f5e3cdfdad is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731857382210/Put/seqid=0 2024-11-17T15:30:18,814 WARN [Thread-1074 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741905_1091 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34067 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:18,814 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:35068 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741905_1091] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data4]'}, localName='127.0.0.1:35847', datanodeUuid='cad15bd0-c8d1-4c46-ba45-a08acfe53d98', xmitsInProgress=0}:Exception transferring block BP-398811610-172.17.0.2-1731857379530:blk_1073741905_1091 to mirror 127.0.0.1:34067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:18,814 WARN [Thread-1074 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741905_1091 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35847,DS-e57b16a9-d350-4424-9cda-497c30bb93e9,DISK], DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK]) is bad. 2024-11-17T15:30:18,814 WARN [Thread-1074 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741905_1091 2024-11-17T15:30:18,814 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:35068 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741905_1091] {}] datanode.BlockReceiver(316): Block 1073741905 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-17T15:30:18,814 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:35068 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741905_1091] {}] datanode.DataXceiver(331): 127.0.0.1:35847:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35068 dst: /127.0.0.1:35847 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:18,815 WARN [Thread-1074 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34067,DS-afe86fda-d881-4740-8700-829d80269652,DISK] 2024-11-17T15:30:18,817 WARN [Thread-1074 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741906_1092 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36583 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:18,816 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:54520 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741906_1092] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data8]'}, localName='127.0.0.1:37341', datanodeUuid='42b6b50f-ae35-42e2-bf0f-012e79f43601', xmitsInProgress=0}:Exception transferring block BP-398811610-172.17.0.2-1731857379530:blk_1073741906_1092 to mirror 127.0.0.1:36583 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:18,817 WARN [Thread-1074 {}] hdfs.DataStreamer(1731): Error Recovery for BP-398811610-172.17.0.2-1731857379530:blk_1073741906_1092 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37341,DS-9aafc437-98aa-4702-8637-e862e802d273,DISK], DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK]) is bad. 2024-11-17T15:30:18,817 WARN [Thread-1074 {}] hdfs.DataStreamer(1850): Abandoning BP-398811610-172.17.0.2-1731857379530:blk_1073741906_1092 2024-11-17T15:30:18,817 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:54520 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741906_1092] {}] datanode.BlockReceiver(316): Block 1073741906 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-17T15:30:18,817 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_677536209_22 at /127.0.0.1:54520 [Receiving block BP-398811610-172.17.0.2-1731857379530:blk_1073741906_1092] {}] datanode.DataXceiver(331): 127.0.0.1:37341:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54520 dst: /127.0.0.1:37341 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:18,817 WARN [Thread-1074 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36583,DS-0c1ffe6e-a543-4dd8-ab32-db61a645e95f,DISK] 2024-11-17T15:30:18,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35847 is added to blk_1073741907_1093 (size=5424) 2024-11-17T15:30:18,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37341 is added to blk_1073741907_1093 (size=5424) 2024-11-17T15:30:18,822 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/hbase/meta/1588230740/.tmp/table/aaa386912a7f476fbd4b56f5e3cdfdad 2024-11-17T15:30:18,828 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/hbase/meta/1588230740/.tmp/info/3184beb75d934abb847869bbef956fd8 as hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/hbase/meta/1588230740/info/3184beb75d934abb847869bbef956fd8 2024-11-17T15:30:18,834 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/hbase/meta/1588230740/info/3184beb75d934abb847869bbef956fd8, entries=10, sequenceid=11, filesize=6.9 K 2024-11-17T15:30:18,835 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/hbase/meta/1588230740/.tmp/ns/10811f57f91a484191852df8d8e7eaad as hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/hbase/meta/1588230740/ns/10811f57f91a484191852df8d8e7eaad 2024-11-17T15:30:18,840 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/hbase/meta/1588230740/ns/10811f57f91a484191852df8d8e7eaad, entries=2, sequenceid=11, filesize=5.0 K 2024-11-17T15:30:18,841 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/hbase/meta/1588230740/.tmp/table/aaa386912a7f476fbd4b56f5e3cdfdad as hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/hbase/meta/1588230740/table/aaa386912a7f476fbd4b56f5e3cdfdad 2024-11-17T15:30:18,846 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/hbase/meta/1588230740/table/aaa386912a7f476fbd4b56f5e3cdfdad, entries=2, sequenceid=11, filesize=5.3 K 2024-11-17T15:30:18,848 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 129ms, sequenceid=11, compaction requested=false 2024-11-17T15:30:18,853 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-17T15:30:18,853 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T15:30:18,853 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T15:30:18,854 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731857418718Running coprocessor pre-close hooks at 1731857418718Disabling compacts and flushes for region at 1731857418718Disabling writes for close at 1731857418718Obtaining lock to block concurrent updates at 1731857418718Preparing flush snapshotting stores in 1588230740 at 1731857418718Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731857418719 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731857418733 (+14 ms)Flushing 1588230740/info: creating writer at 1731857418733Flushing 1588230740/info: appending metadata at 1731857418751 (+18 ms)Flushing 1588230740/info: closing flushed file at 1731857418751Flushing 1588230740/ns: creating writer at 1731857418766 (+15 ms)Flushing 1588230740/ns: appending metadata at 1731857418781 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731857418781Flushing 1588230740/table: creating writer at 1731857418795 (+14 ms)Flushing 1588230740/table: appending metadata at 1731857418811 (+16 ms)Flushing 1588230740/table: closing flushed file at 1731857418811Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@428aed15: reopening flushed file at 1731857418828 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@75600724: reopening flushed file at 1731857418834 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@462982a1: reopening flushed file at 1731857418841 (+7 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 129ms, sequenceid=11, compaction requested=false at 1731857418848 (+7 ms)Writing region close event to WAL at 1731857418849 (+1 ms)Running coprocessor post-close hooks at 1731857418853 (+4 ms)Closed at 1731857418853 2024-11-17T15:30:18,854 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-17T15:30:18,862 INFO [regionserver/7a780d55532c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-17T15:30:18,862 INFO [regionserver/7a780d55532c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-17T15:30:18,918 INFO [RS:0;7a780d55532c:38057 {}] regionserver.HRegionServer(976): stopping server 7a780d55532c,38057,1731857380513; all regions closed. 2024-11-17T15:30:18,919 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:18,919 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:18,919 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:18,919 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:18,919 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:18,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37341 is added to blk_1073741899_1084 (size=825) 2024-11-17T15:30:18,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35847 is added to blk_1073741899_1084 (size=825) 2024-11-17T15:30:18,956 INFO [regionserver/7a780d55532c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T15:30:19,028 INFO [regionserver/7a780d55532c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-17T15:30:19,028 INFO [regionserver/7a780d55532c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-17T15:30:19,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35847 is added to blk_1073741876_1059 (size=12911) 2024-11-17T15:30:19,782 INFO [regionserver/7a780d55532c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T15:30:21,379 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-17T15:30:21,380 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T15:30:21,380 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-17T15:30:22,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37341 is added to blk_1073741835_1011 (size=393) 2024-11-17T15:30:22,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37341 is added to blk_1073741831_1007 (size=1321) 2024-11-17T15:30:22,221 INFO [master/7a780d55532c:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-17T15:30:22,221 INFO [master/7a780d55532c:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-17T15:30:22,721 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 after 4002ms 2024-11-17T15:30:22,733 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta after 4001ms 2024-11-17T15:30:23,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37341 is added to blk_1073741829_1005 (size=34) 2024-11-17T15:30:23,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37341 is added to blk_1073741827_1003 (size=196) 2024-11-17T15:30:23,719 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-17T15:30:23,722 DEBUG [RS:1;7a780d55532c:35049 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/oldWALs 2024-11-17T15:30:23,722 INFO [RS:1;7a780d55532c:35049 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7a780d55532c%2C35049%2C1731857381690:(num 1731857381925) 2024-11-17T15:30:23,722 DEBUG [RS:1;7a780d55532c:35049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:30:23,722 INFO [RS:1;7a780d55532c:35049 {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T15:30:23,722 INFO [RS:1;7a780d55532c:35049 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T15:30:23,722 INFO [RS:1;7a780d55532c:35049 {}] hbase.ChoreService(370): Chore service for: regionserver/7a780d55532c:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-17T15:30:23,722 INFO [RS:1;7a780d55532c:35049 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-17T15:30:23,722 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T15:30:23,722 INFO [RS:1;7a780d55532c:35049 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-17T15:30:23,722 INFO [RS:1;7a780d55532c:35049 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-17T15:30:23,722 INFO [RS:1;7a780d55532c:35049 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T15:30:23,722 INFO [RS:1;7a780d55532c:35049 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35049 2024-11-17T15:30:23,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35049-0x101268c5ec30002, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7a780d55532c,35049,1731857381690 2024-11-17T15:30:23,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33923-0x101268c5ec30000, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T15:30:23,726 INFO [RS:1;7a780d55532c:35049 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T15:30:23,728 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7a780d55532c,35049,1731857381690] 2024-11-17T15:30:23,728 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:23,729 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7a780d55532c,35049,1731857381690 already deleted, retry=false 2024-11-17T15:30:23,729 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7a780d55532c,35049,1731857381690 expired; onlineServers=1 2024-11-17T15:30:23,774 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:23,787 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:23,787 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:23,787 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:23,787 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:23,788 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:23,794 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:23,795 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:23,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35049-0x101268c5ec30002, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T15:30:23,828 INFO [RS:1;7a780d55532c:35049 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T15:30:23,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35049-0x101268c5ec30002, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T15:30:23,828 INFO [RS:1;7a780d55532c:35049 {}] regionserver.HRegionServer(1031): Exiting; stopping=7a780d55532c,35049,1731857381690; zookeeper connection closed. 2024-11-17T15:30:23,828 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7a01df63 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7a01df63 2024-11-17T15:30:23,920 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-17T15:30:23,923 DEBUG [RS:0;7a780d55532c:38057 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/oldWALs 2024-11-17T15:30:23,923 INFO [RS:0;7a780d55532c:38057 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7a780d55532c%2C38057%2C1731857380513.meta:.meta(num 1731857418719) 2024-11-17T15:30:23,924 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:23,924 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:23,924 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:23,924 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:23,924 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:23,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37341 is added to blk_1073741890_1074 (size=16988) 2024-11-17T15:30:23,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35847 is added to blk_1073741890_1074 (size=16988) 2024-11-17T15:30:23,928 DEBUG [RS:0;7a780d55532c:38057 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/oldWALs 2024-11-17T15:30:23,928 INFO [RS:0;7a780d55532c:38057 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7a780d55532c%2C38057%2C1731857380513:(num 1731857418269) 2024-11-17T15:30:23,928 DEBUG [RS:0;7a780d55532c:38057 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:30:23,928 INFO [RS:0;7a780d55532c:38057 {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T15:30:23,928 INFO [RS:0;7a780d55532c:38057 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T15:30:23,929 INFO [RS:0;7a780d55532c:38057 {}] hbase.ChoreService(370): Chore service for: regionserver/7a780d55532c:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-17T15:30:23,929 INFO [RS:0;7a780d55532c:38057 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T15:30:23,929 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T15:30:23,929 INFO [RS:0;7a780d55532c:38057 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38057 2024-11-17T15:30:23,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38057-0x101268c5ec30001, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7a780d55532c,38057,1731857380513 2024-11-17T15:30:23,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33923-0x101268c5ec30000, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T15:30:23,931 INFO [RS:0;7a780d55532c:38057 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T15:30:23,933 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7a780d55532c,38057,1731857380513] 2024-11-17T15:30:23,935 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7a780d55532c,38057,1731857380513 already deleted, retry=false 2024-11-17T15:30:23,935 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7a780d55532c,38057,1731857380513 expired; onlineServers=0 2024-11-17T15:30:23,935 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7a780d55532c,33923,1731857380422' ***** 2024-11-17T15:30:23,935 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-17T15:30:23,935 INFO [M:0;7a780d55532c:33923 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T15:30:23,935 INFO [M:0;7a780d55532c:33923 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T15:30:23,935 DEBUG [M:0;7a780d55532c:33923 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-17T15:30:23,935 DEBUG [M:0;7a780d55532c:33923 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-17T15:30:23,935 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-17T15:30:23,935 DEBUG [master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.small.0-1731857380836 {}] cleaner.HFileCleaner(306): Exit Thread[master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.small.0-1731857380836,5,FailOnTimeoutGroup] 2024-11-17T15:30:23,935 DEBUG [master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.large.0-1731857380833 {}] cleaner.HFileCleaner(306): Exit Thread[master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.large.0-1731857380833,5,FailOnTimeoutGroup] 2024-11-17T15:30:23,936 INFO [M:0;7a780d55532c:33923 {}] hbase.ChoreService(370): Chore service for: master/7a780d55532c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-17T15:30:23,936 INFO [M:0;7a780d55532c:33923 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T15:30:23,936 DEBUG [M:0;7a780d55532c:33923 {}] master.HMaster(1795): Stopping service threads 2024-11-17T15:30:23,936 INFO [M:0;7a780d55532c:33923 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-17T15:30:23,936 INFO [M:0;7a780d55532c:33923 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T15:30:23,936 INFO [M:0;7a780d55532c:33923 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-17T15:30:23,936 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-17T15:30:23,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33923-0x101268c5ec30000, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-17T15:30:23,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33923-0x101268c5ec30000, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:30:23,937 DEBUG [M:0;7a780d55532c:33923 {}] zookeeper.ZKUtil(347): master:33923-0x101268c5ec30000, quorum=127.0.0.1:53267, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-17T15:30:23,937 WARN [M:0;7a780d55532c:33923 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-17T15:30:23,938 INFO [M:0;7a780d55532c:33923 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/.lastflushedseqids 2024-11-17T15:30:23,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35847 is added to blk_1073741908_1094 (size=130) 2024-11-17T15:30:23,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37341 is added to blk_1073741908_1094 (size=130) 2024-11-17T15:30:23,944 INFO [M:0;7a780d55532c:33923 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-17T15:30:23,944 INFO [M:0;7a780d55532c:33923 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-17T15:30:23,944 DEBUG [M:0;7a780d55532c:33923 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T15:30:23,944 INFO [M:0;7a780d55532c:33923 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:30:23,944 DEBUG [M:0;7a780d55532c:33923 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:30:23,944 DEBUG [M:0;7a780d55532c:33923 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T15:30:23,944 DEBUG [M:0;7a780d55532c:33923 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:30:23,944 INFO [M:0;7a780d55532c:33923 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-17T15:30:23,961 DEBUG [M:0;7a780d55532c:33923 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6a244940fc4a4be6a405bdc05d7672e6 is 82, key is hbase:meta,,1/info:regioninfo/1731857381591/Put/seqid=0 2024-11-17T15:30:23,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35847 is added to blk_1073741909_1095 (size=5672) 2024-11-17T15:30:23,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37341 is added to blk_1073741909_1095 (size=5672) 2024-11-17T15:30:23,966 INFO [M:0;7a780d55532c:33923 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6a244940fc4a4be6a405bdc05d7672e6 2024-11-17T15:30:23,987 DEBUG [M:0;7a780d55532c:33923 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5ec1bf71d9544e23a47e64c4f6faa207 is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731857382215/Put/seqid=0 2024-11-17T15:30:23,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37341 is added to blk_1073741910_1096 (size=6255) 2024-11-17T15:30:23,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35847 is added to blk_1073741910_1096 (size=6255) 2024-11-17T15:30:23,991 INFO [M:0;7a780d55532c:33923 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5ec1bf71d9544e23a47e64c4f6faa207 2024-11-17T15:30:23,996 INFO [M:0;7a780d55532c:33923 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 5ec1bf71d9544e23a47e64c4f6faa207 2024-11-17T15:30:24,010 DEBUG [M:0;7a780d55532c:33923 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dd69fe1098ed49b8ba40facd6b82732a is 69, key is 7a780d55532c,35049,1731857381690/rs:state/1731857381749/Put/seqid=0 2024-11-17T15:30:24,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35847 is added to blk_1073741911_1097 (size=5224) 2024-11-17T15:30:24,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37341 is added to blk_1073741911_1097 (size=5224) 2024-11-17T15:30:24,015 INFO [M:0;7a780d55532c:33923 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dd69fe1098ed49b8ba40facd6b82732a 2024-11-17T15:30:24,033 DEBUG [M:0;7a780d55532c:33923 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3b45cfb9f01f4387b6e5f70496a96fd7 is 52, key is load_balancer_on/state:d/1731857381673/Put/seqid=0 2024-11-17T15:30:24,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38057-0x101268c5ec30001, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T15:30:24,034 INFO [RS:0;7a780d55532c:38057 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T15:30:24,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38057-0x101268c5ec30001, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T15:30:24,034 INFO [RS:0;7a780d55532c:38057 {}] regionserver.HRegionServer(1031): Exiting; stopping=7a780d55532c,38057,1731857380513; zookeeper connection closed. 2024-11-17T15:30:24,034 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5e0adc88 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5e0adc88 2024-11-17T15:30:24,034 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-17T15:30:24,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37341 is added to blk_1073741912_1098 (size=5056) 2024-11-17T15:30:24,038 INFO [M:0;7a780d55532c:33923 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3b45cfb9f01f4387b6e5f70496a96fd7 2024-11-17T15:30:24,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35847 is added to blk_1073741912_1098 (size=5056) 2024-11-17T15:30:24,044 DEBUG [M:0;7a780d55532c:33923 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6a244940fc4a4be6a405bdc05d7672e6 as hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6a244940fc4a4be6a405bdc05d7672e6 2024-11-17T15:30:24,049 INFO [M:0;7a780d55532c:33923 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6a244940fc4a4be6a405bdc05d7672e6, entries=8, sequenceid=60, filesize=5.5 K 2024-11-17T15:30:24,050 DEBUG [M:0;7a780d55532c:33923 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5ec1bf71d9544e23a47e64c4f6faa207 as hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5ec1bf71d9544e23a47e64c4f6faa207 2024-11-17T15:30:24,054 INFO [M:0;7a780d55532c:33923 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 5ec1bf71d9544e23a47e64c4f6faa207 2024-11-17T15:30:24,054 INFO [M:0;7a780d55532c:33923 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5ec1bf71d9544e23a47e64c4f6faa207, entries=6, sequenceid=60, filesize=6.1 K 2024-11-17T15:30:24,055 DEBUG [M:0;7a780d55532c:33923 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dd69fe1098ed49b8ba40facd6b82732a as hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/dd69fe1098ed49b8ba40facd6b82732a 2024-11-17T15:30:24,060 INFO [M:0;7a780d55532c:33923 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/dd69fe1098ed49b8ba40facd6b82732a, entries=2, sequenceid=60, filesize=5.1 K 2024-11-17T15:30:24,061 DEBUG [M:0;7a780d55532c:33923 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3b45cfb9f01f4387b6e5f70496a96fd7 as hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3b45cfb9f01f4387b6e5f70496a96fd7 2024-11-17T15:30:24,066 INFO [M:0;7a780d55532c:33923 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3b45cfb9f01f4387b6e5f70496a96fd7, entries=1, sequenceid=60, filesize=4.9 K 2024-11-17T15:30:24,067 INFO [M:0;7a780d55532c:33923 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 123ms, sequenceid=60, compaction requested=false 2024-11-17T15:30:24,068 INFO [M:0;7a780d55532c:33923 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:30:24,068 DEBUG [M:0;7a780d55532c:33923 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731857423944Disabling compacts and flushes for region at 1731857423944Disabling writes for close at 1731857423944Obtaining lock to block concurrent updates at 1731857423944Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731857423944Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1731857423945 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731857423945Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731857423945Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731857423961 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731857423961Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731857423972 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731857423986 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731857423986Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731857423996 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731857424009 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731857424009Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731857424019 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731857424032 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731857424032Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4677b27f: reopening flushed file at 1731857424043 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@407d3948: reopening flushed file at 1731857424049 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f3d06a8: reopening flushed file at 1731857424055 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4cfe735c: reopening flushed file at 1731857424060 (+5 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 123ms, sequenceid=60, compaction requested=false at 1731857424067 (+7 ms)Writing region close event to WAL at 1731857424068 (+1 ms)Closed at 1731857424068 2024-11-17T15:30:24,069 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:24,069 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:24,069 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:24,069 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:24,069 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:24,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35847 is added to blk_1073741887_1070 (size=1045) 2024-11-17T15:30:24,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37341 is added to blk_1073741887_1070 (size=1045) 2024-11-17T15:30:24,297 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-17T15:30:24,309 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:24,309 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:24,309 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:24,310 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:24,310 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:24,310 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:24,313 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:24,315 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:24,729 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:24,734 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:25,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37341 is added to blk_1073741836_1012 (size=76) 2024-11-17T15:30:25,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37341 is added to blk_1073741825_1001 (size=7) 2024-11-17T15:30:25,063 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@803fd26 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-398811610-172.17.0.2-1731857379530:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:34067,null,null]) java.net.ConnectException: Call From 7a780d55532c/172.17.0.2 to localhost:38563 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-17T15:30:25,730 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:25,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:25,840 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/WALs/7a780d55532c,33923,1731857380422/7a780d55532c%2C33923%2C1731857380422.1731857380683 to hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/oldWALs/7a780d55532c%2C33923%2C1731857380422.1731857380683 2024-11-17T15:30:25,843 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/MasterData/oldWALs/7a780d55532c%2C33923%2C1731857380422.1731857380683 to hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/oldWALs/7a780d55532c%2C33923%2C1731857380422.1731857380683$masterlocalwal$ 2024-11-17T15:30:25,843 INFO [M:0;7a780d55532c:33923 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-17T15:30:25,843 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T15:30:25,843 INFO [M:0;7a780d55532c:33923 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33923 2024-11-17T15:30:25,844 INFO [M:0;7a780d55532c:33923 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T15:30:25,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33923-0x101268c5ec30000, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T15:30:25,946 INFO [M:0;7a780d55532c:33923 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T15:30:25,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33923-0x101268c5ec30000, quorum=127.0.0.1:53267, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T15:30:25,948 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@74f37d73{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:30:25,949 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2e73b100{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T15:30:25,949 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T15:30:25,949 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7eb80544{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T15:30:25,949 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@43ad4e97{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/hadoop.log.dir/,STOPPED} 2024-11-17T15:30:25,950 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T15:30:25,950 WARN [BP-398811610-172.17.0.2-1731857379530 heartbeating to localhost/127.0.0.1:42915 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T15:30:25,950 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T15:30:25,950 WARN [BP-398811610-172.17.0.2-1731857379530 heartbeating to localhost/127.0.0.1:42915 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-398811610-172.17.0.2-1731857379530 (Datanode Uuid cad15bd0-c8d1-4c46-ba45-a08acfe53d98) service to localhost/127.0.0.1:42915 2024-11-17T15:30:25,950 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7002a744 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-398811610-172.17.0.2-1731857379530:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:34067,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:38563 , LocalHost:localPort 7a780d55532c/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-17T15:30:25,951 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7002a744 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-398811610-172.17.0.2-1731857379530:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:35847,null,null], DatanodeInfoWithStorage[127.0.0.1:34067,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-398811610-172.17.0.2-1731857379530 2024-11-17T15:30:25,951 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data3/current/BP-398811610-172.17.0.2-1731857379530 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:30:25,951 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7002a744 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-398811610-172.17.0.2-1731857379530:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:35847,null,null]) java.io.IOException: No block pool offer service for bpid=BP-398811610-172.17.0.2-1731857379530 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:25,951 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7002a744 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-398811610-172.17.0.2-1731857379530:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:34067,null,null]) java.io.IOException: No block pool offer service for bpid=BP-398811610-172.17.0.2-1731857379530 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:25,951 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data4/current/BP-398811610-172.17.0.2-1731857379530 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:30:25,951 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7002a744 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-398811610-172.17.0.2-1731857379530:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:35847,null,null], DatanodeInfoWithStorage[127.0.0.1:34067,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-398811610-172.17.0.2-1731857379530:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:35847,null,null], DatanodeInfoWithStorage[127.0.0.1:34067,null,null]] 2024-11-17T15:30:25,951 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T15:30:25,953 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3301cee2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:30:25,954 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4e6d719c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T15:30:25,954 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T15:30:25,954 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@a86ef00{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T15:30:25,954 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@9cb12d7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/hadoop.log.dir/,STOPPED} 2024-11-17T15:30:25,956 WARN [BP-398811610-172.17.0.2-1731857379530 heartbeating to localhost/127.0.0.1:42915 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T15:30:25,956 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T15:30:25,956 WARN [BP-398811610-172.17.0.2-1731857379530 heartbeating to localhost/127.0.0.1:42915 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-398811610-172.17.0.2-1731857379530 (Datanode Uuid 42b6b50f-ae35-42e2-bf0f-012e79f43601) service to localhost/127.0.0.1:42915 2024-11-17T15:30:25,956 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T15:30:25,956 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data7/current/BP-398811610-172.17.0.2-1731857379530 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:30:25,956 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/cluster_2ec0f2f9-a631-5bdd-ff0d-4d5c6ea1a4ca/data/data8/current/BP-398811610-172.17.0.2-1731857379530 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:30:25,957 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T15:30:25,962 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@165c5bf1{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T15:30:25,963 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@73e293ca{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T15:30:25,963 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T15:30:25,963 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6cef6566{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T15:30:25,963 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@302502f5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/hadoop.log.dir/,STOPPED} 2024-11-17T15:30:25,971 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-17T15:30:26,000 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-17T15:30:26,008 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=154 (was 78) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:42915 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$898/0x00007f923cbf4ff8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42915 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42915 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:42915 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42915 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$898/0x00007f923cbf4ff8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:42915 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:42915 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42915 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45951 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:42915 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42915 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:45951 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:42915 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=452 (was 404) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=188 (was 187) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3820 (was 3802) - AvailableMemoryMB LEAK? - 2024-11-17T15:30:26,015 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=154, OpenFileDescriptor=452, MaxFileDescriptor=1048576, SystemLoadAverage=188, ProcessCount=11, AvailableMemoryMB=3820 2024-11-17T15:30:26,016 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-17T15:30:26,016 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/hadoop.log.dir so I do NOT create it in target/test-data/466a8a68-b03c-5395-5402-4e756ae99751 2024-11-17T15:30:26,016 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d47f383f-f337-07a8-3c31-474a1ed8547b/hadoop.tmp.dir so I do NOT create it in target/test-data/466a8a68-b03c-5395-5402-4e756ae99751 2024-11-17T15:30:26,016 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/cluster_51169b72-0b57-551e-5b79-ff6d39288205, deleteOnExit=true 2024-11-17T15:30:26,016 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-17T15:30:26,016 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/test.cache.data in system properties and HBase conf 2024-11-17T15:30:26,016 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/hadoop.tmp.dir in system properties and HBase conf 2024-11-17T15:30:26,016 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/hadoop.log.dir in system properties and HBase conf 2024-11-17T15:30:26,016 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-17T15:30:26,016 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-17T15:30:26,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-17T15:30:26,017 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-17T15:30:26,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-17T15:30:26,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-17T15:30:26,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-17T15:30:26,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T15:30:26,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-17T15:30:26,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-17T15:30:26,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T15:30:26,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T15:30:26,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-17T15:30:26,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/nfs.dump.dir in system properties and HBase conf 2024-11-17T15:30:26,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/java.io.tmpdir in system properties and HBase conf 2024-11-17T15:30:26,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T15:30:26,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-17T15:30:26,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-17T15:30:26,031 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T15:30:26,103 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:30:26,108 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T15:30:26,109 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T15:30:26,109 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T15:30:26,109 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T15:30:26,113 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:30:26,113 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8df6f39{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/hadoop.log.dir/,AVAILABLE} 2024-11-17T15:30:26,113 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35d68916{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T15:30:26,236 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@40ce61ea{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/java.io.tmpdir/jetty-localhost-41121-hadoop-hdfs-3_4_1-tests_jar-_-any-14888932988923157148/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T15:30:26,236 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3734ddc7{HTTP/1.1, (http/1.1)}{localhost:41121} 2024-11-17T15:30:26,236 INFO [Time-limited test {}] server.Server(415): Started @150158ms 2024-11-17T15:30:26,249 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T15:30:26,317 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:30:26,319 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T15:30:26,320 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T15:30:26,320 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T15:30:26,320 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T15:30:26,321 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3b3376d9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/hadoop.log.dir/,AVAILABLE} 2024-11-17T15:30:26,321 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7f6bebc0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T15:30:26,436 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@131e974b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/java.io.tmpdir/jetty-localhost-36047-hadoop-hdfs-3_4_1-tests_jar-_-any-15475039911867757679/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:30:26,436 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5aff23e7{HTTP/1.1, (http/1.1)}{localhost:36047} 2024-11-17T15:30:26,437 INFO [Time-limited test {}] server.Server(415): Started @150359ms 2024-11-17T15:30:26,438 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T15:30:26,466 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:30:26,469 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T15:30:26,470 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T15:30:26,470 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T15:30:26,470 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T15:30:26,470 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21998c84{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/hadoop.log.dir/,AVAILABLE} 2024-11-17T15:30:26,471 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@794cb94f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T15:30:26,547 WARN [Thread-1200 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/cluster_51169b72-0b57-551e-5b79-ff6d39288205/data/data1/current/BP-1061960288-172.17.0.2-1731857426049/current, will proceed with Du for space computation calculation, 2024-11-17T15:30:26,547 WARN [Thread-1201 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/cluster_51169b72-0b57-551e-5b79-ff6d39288205/data/data2/current/BP-1061960288-172.17.0.2-1731857426049/current, will proceed with Du for space computation calculation, 2024-11-17T15:30:26,563 WARN [Thread-1179 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T15:30:26,566 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7f913c4e67a58e34 with lease ID 0x7016e82585fa58f0: Processing first storage report for DS-a395277a-0be1-49dc-816c-4f82b4a2dbcc from datanode DatanodeRegistration(127.0.0.1:46793, datanodeUuid=447dae23-b803-487e-bf73-7d03e699e225, infoPort=38115, infoSecurePort=0, ipcPort=33021, storageInfo=lv=-57;cid=testClusterID;nsid=1457268781;c=1731857426049) 2024-11-17T15:30:26,566 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7f913c4e67a58e34 with lease ID 0x7016e82585fa58f0: from storage DS-a395277a-0be1-49dc-816c-4f82b4a2dbcc node DatanodeRegistration(127.0.0.1:46793, datanodeUuid=447dae23-b803-487e-bf73-7d03e699e225, infoPort=38115, infoSecurePort=0, ipcPort=33021, storageInfo=lv=-57;cid=testClusterID;nsid=1457268781;c=1731857426049), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-17T15:30:26,566 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7f913c4e67a58e34 with lease ID 0x7016e82585fa58f0: Processing first storage report for DS-b7dc572d-11c4-4046-8f20-dc70f33bf181 from datanode DatanodeRegistration(127.0.0.1:46793, datanodeUuid=447dae23-b803-487e-bf73-7d03e699e225, infoPort=38115, infoSecurePort=0, ipcPort=33021, storageInfo=lv=-57;cid=testClusterID;nsid=1457268781;c=1731857426049) 2024-11-17T15:30:26,566 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7f913c4e67a58e34 with lease ID 0x7016e82585fa58f0: from storage DS-b7dc572d-11c4-4046-8f20-dc70f33bf181 node DatanodeRegistration(127.0.0.1:46793, datanodeUuid=447dae23-b803-487e-bf73-7d03e699e225, infoPort=38115, infoSecurePort=0, ipcPort=33021, storageInfo=lv=-57;cid=testClusterID;nsid=1457268781;c=1731857426049), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T15:30:26,603 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@aae1781{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/java.io.tmpdir/jetty-localhost-41649-hadoop-hdfs-3_4_1-tests_jar-_-any-2587608140783502199/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:30:26,604 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4ef0fe27{HTTP/1.1, (http/1.1)}{localhost:41649} 2024-11-17T15:30:26,604 INFO [Time-limited test {}] server.Server(415): Started @150526ms 2024-11-17T15:30:26,605 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T15:30:26,709 WARN [Thread-1226 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/cluster_51169b72-0b57-551e-5b79-ff6d39288205/data/data3/current/BP-1061960288-172.17.0.2-1731857426049/current, will proceed with Du for space computation calculation, 2024-11-17T15:30:26,709 WARN [Thread-1227 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/cluster_51169b72-0b57-551e-5b79-ff6d39288205/data/data4/current/BP-1061960288-172.17.0.2-1731857426049/current, will proceed with Du for space computation calculation, 2024-11-17T15:30:26,730 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:26,731 WARN [Thread-1215 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T15:30:26,734 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x55edf21b90c29453 with lease ID 0x7016e82585fa58f1: Processing first storage report for DS-7e13c295-e1da-44cc-b769-460087a89f8d from datanode DatanodeRegistration(127.0.0.1:32795, datanodeUuid=12f6a0f3-ac45-417e-acc2-7b19ff10e633, infoPort=34157, infoSecurePort=0, ipcPort=42535, storageInfo=lv=-57;cid=testClusterID;nsid=1457268781;c=1731857426049) 2024-11-17T15:30:26,734 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x55edf21b90c29453 with lease ID 0x7016e82585fa58f1: from storage DS-7e13c295-e1da-44cc-b769-460087a89f8d node DatanodeRegistration(127.0.0.1:32795, datanodeUuid=12f6a0f3-ac45-417e-acc2-7b19ff10e633, infoPort=34157, infoSecurePort=0, ipcPort=42535, storageInfo=lv=-57;cid=testClusterID;nsid=1457268781;c=1731857426049), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T15:30:26,734 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x55edf21b90c29453 with lease ID 0x7016e82585fa58f1: Processing first storage report for DS-eaed7fcb-5dba-4f52-a703-df65103416d1 from datanode DatanodeRegistration(127.0.0.1:32795, datanodeUuid=12f6a0f3-ac45-417e-acc2-7b19ff10e633, infoPort=34157, infoSecurePort=0, ipcPort=42535, storageInfo=lv=-57;cid=testClusterID;nsid=1457268781;c=1731857426049) 2024-11-17T15:30:26,734 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x55edf21b90c29453 with lease ID 0x7016e82585fa58f1: from storage DS-eaed7fcb-5dba-4f52-a703-df65103416d1 node DatanodeRegistration(127.0.0.1:32795, datanodeUuid=12f6a0f3-ac45-417e-acc2-7b19ff10e633, infoPort=34157, infoSecurePort=0, ipcPort=42535, storageInfo=lv=-57;cid=testClusterID;nsid=1457268781;c=1731857426049), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T15:30:26,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:26,829 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751 2024-11-17T15:30:26,832 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/cluster_51169b72-0b57-551e-5b79-ff6d39288205/zookeeper_0, clientPort=56215, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/cluster_51169b72-0b57-551e-5b79-ff6d39288205/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/cluster_51169b72-0b57-551e-5b79-ff6d39288205/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-17T15:30:26,833 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56215 2024-11-17T15:30:26,833 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:30:26,834 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:30:26,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32795 is added to blk_1073741825_1001 (size=7) 2024-11-17T15:30:26,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46793 is added to blk_1073741825_1001 (size=7) 2024-11-17T15:30:26,844 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc with version=8 2024-11-17T15:30:26,844 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/hbase-staging 2024-11-17T15:30:26,845 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7a780d55532c:0 server-side Connection retries=45 2024-11-17T15:30:26,846 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T15:30:26,846 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T15:30:26,846 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T15:30:26,846 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T15:30:26,846 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T15:30:26,846 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-17T15:30:26,846 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T15:30:26,847 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33399 2024-11-17T15:30:26,848 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33399 connecting to ZooKeeper ensemble=127.0.0.1:56215 2024-11-17T15:30:26,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:333990x0, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T15:30:26,854 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33399-0x101268d141f0000 connected 2024-11-17T15:30:26,871 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:30:26,873 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:30:26,874 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33399-0x101268d141f0000, quorum=127.0.0.1:56215, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T15:30:26,875 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc, hbase.cluster.distributed=false 2024-11-17T15:30:26,876 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33399-0x101268d141f0000, quorum=127.0.0.1:56215, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T15:30:26,880 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33399 2024-11-17T15:30:26,881 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33399 2024-11-17T15:30:26,881 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33399 2024-11-17T15:30:26,881 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33399 2024-11-17T15:30:26,881 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33399 2024-11-17T15:30:26,897 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7a780d55532c:0 server-side Connection retries=45 2024-11-17T15:30:26,897 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T15:30:26,897 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T15:30:26,897 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T15:30:26,897 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T15:30:26,897 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T15:30:26,897 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-17T15:30:26,897 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T15:30:26,898 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45235 2024-11-17T15:30:26,899 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45235 connecting to ZooKeeper ensemble=127.0.0.1:56215 2024-11-17T15:30:26,899 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:30:26,901 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:30:26,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:452350x0, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T15:30:26,906 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:452350x0, quorum=127.0.0.1:56215, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T15:30:26,906 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45235-0x101268d141f0001 connected 2024-11-17T15:30:26,906 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-17T15:30:26,907 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-17T15:30:26,907 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45235-0x101268d141f0001, quorum=127.0.0.1:56215, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-17T15:30:26,908 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45235-0x101268d141f0001, quorum=127.0.0.1:56215, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T15:30:26,910 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45235 2024-11-17T15:30:26,911 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45235 2024-11-17T15:30:26,912 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45235 2024-11-17T15:30:26,914 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45235 2024-11-17T15:30:26,915 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45235 2024-11-17T15:30:26,927 DEBUG [M:0;7a780d55532c:33399 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7a780d55532c:33399 2024-11-17T15:30:26,927 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7a780d55532c,33399,1731857426845 2024-11-17T15:30:26,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33399-0x101268d141f0000, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T15:30:26,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45235-0x101268d141f0001, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T15:30:26,931 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33399-0x101268d141f0000, quorum=127.0.0.1:56215, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7a780d55532c,33399,1731857426845 2024-11-17T15:30:26,932 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45235-0x101268d141f0001, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-17T15:30:26,932 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33399-0x101268d141f0000, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:30:26,932 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45235-0x101268d141f0001, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:30:26,933 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33399-0x101268d141f0000, quorum=127.0.0.1:56215, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-17T15:30:26,933 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7a780d55532c,33399,1731857426845 from backup master directory 2024-11-17T15:30:26,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33399-0x101268d141f0000, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7a780d55532c,33399,1731857426845 2024-11-17T15:30:26,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45235-0x101268d141f0001, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T15:30:26,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33399-0x101268d141f0000, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T15:30:26,935 WARN [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T15:30:26,935 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7a780d55532c,33399,1731857426845 2024-11-17T15:30:26,939 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/hbase.id] with ID: e8fe7ad3-639a-468f-a88d-ea7629c61bbf 2024-11-17T15:30:26,939 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/.tmp/hbase.id 2024-11-17T15:30:26,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46793 is added to blk_1073741826_1002 (size=42) 2024-11-17T15:30:26,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32795 is added to blk_1073741826_1002 (size=42) 2024-11-17T15:30:26,945 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/.tmp/hbase.id]:[hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/hbase.id] 2024-11-17T15:30:26,957 INFO [master/7a780d55532c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:30:26,957 INFO [master/7a780d55532c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-17T15:30:26,958 INFO [master/7a780d55532c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-17T15:30:26,960 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33399-0x101268d141f0000, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:30:26,960 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45235-0x101268d141f0001, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:30:26,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32795 is added to blk_1073741827_1003 (size=196) 2024-11-17T15:30:26,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46793 is added to blk_1073741827_1003 (size=196) 2024-11-17T15:30:26,967 INFO [master/7a780d55532c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T15:30:26,967 INFO [master/7a780d55532c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-17T15:30:26,968 INFO [master/7a780d55532c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T15:30:26,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46793 is added to blk_1073741828_1004 (size=1189) 2024-11-17T15:30:26,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32795 is added to blk_1073741828_1004 (size=1189) 2024-11-17T15:30:26,978 INFO [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/data/master/store 2024-11-17T15:30:26,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32795 is added to blk_1073741829_1005 (size=34) 2024-11-17T15:30:26,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46793 is added to blk_1073741829_1005 (size=34) 2024-11-17T15:30:26,985 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:30:26,985 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T15:30:26,985 INFO [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:30:26,985 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:30:26,985 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T15:30:26,985 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:30:26,985 INFO [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:30:26,985 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731857426985Disabling compacts and flushes for region at 1731857426985Disabling writes for close at 1731857426985Writing region close event to WAL at 1731857426985Closed at 1731857426985 2024-11-17T15:30:26,986 WARN [master/7a780d55532c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/data/master/store/.initializing 2024-11-17T15:30:26,986 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/WALs/7a780d55532c,33399,1731857426845 2024-11-17T15:30:26,989 INFO [master/7a780d55532c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a780d55532c%2C33399%2C1731857426845, suffix=, logDir=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/WALs/7a780d55532c,33399,1731857426845, archiveDir=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/oldWALs, maxLogs=10 2024-11-17T15:30:26,989 INFO [master/7a780d55532c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C33399%2C1731857426845.1731857426989 2024-11-17T15:30:26,993 INFO [master/7a780d55532c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/WALs/7a780d55532c,33399,1731857426845/7a780d55532c%2C33399%2C1731857426845.1731857426989 2024-11-17T15:30:26,997 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38115:38115),(127.0.0.1/127.0.0.1:34157:34157)] 2024-11-17T15:30:26,997 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-17T15:30:26,997 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:30:26,998 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:30:26,998 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:30:26,999 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:30:27,000 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-17T15:30:27,000 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:30:27,001 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:30:27,001 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:30:27,002 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-17T15:30:27,002 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:30:27,002 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T15:30:27,002 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:30:27,003 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-17T15:30:27,003 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:30:27,004 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T15:30:27,004 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:30:27,005 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-17T15:30:27,005 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:30:27,005 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T15:30:27,006 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:30:27,006 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:30:27,006 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:30:27,008 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:30:27,008 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:30:27,008 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-17T15:30:27,009 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:30:27,011 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T15:30:27,011 INFO [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=731728, jitterRate=-0.06956073641777039}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-17T15:30:27,012 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731857426998Initializing all the Stores at 1731857426998Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857426998Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857426999 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857426999Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857426999Cleaning up temporary data from old regions at 1731857427008 (+9 ms)Region opened successfully at 1731857427012 (+4 ms) 2024-11-17T15:30:27,012 INFO [master/7a780d55532c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-17T15:30:27,015 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d4742aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7a780d55532c/172.17.0.2:0 2024-11-17T15:30:27,016 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-17T15:30:27,016 INFO [master/7a780d55532c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-17T15:30:27,016 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-17T15:30:27,016 INFO [master/7a780d55532c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-17T15:30:27,017 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-17T15:30:27,017 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-17T15:30:27,017 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-17T15:30:27,019 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-17T15:30:27,020 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33399-0x101268d141f0000, quorum=127.0.0.1:56215, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-17T15:30:27,023 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-17T15:30:27,023 INFO [master/7a780d55532c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-17T15:30:27,024 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33399-0x101268d141f0000, quorum=127.0.0.1:56215, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-17T15:30:27,025 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-17T15:30:27,025 INFO [master/7a780d55532c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-17T15:30:27,026 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33399-0x101268d141f0000, quorum=127.0.0.1:56215, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-17T15:30:27,027 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-17T15:30:27,028 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33399-0x101268d141f0000, quorum=127.0.0.1:56215, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-17T15:30:27,030 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-17T15:30:27,032 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33399-0x101268d141f0000, quorum=127.0.0.1:56215, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-17T15:30:27,034 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-17T15:30:27,036 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33399-0x101268d141f0000, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T15:30:27,036 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45235-0x101268d141f0001, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T15:30:27,036 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45235-0x101268d141f0001, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:30:27,036 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33399-0x101268d141f0000, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:30:27,036 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7a780d55532c,33399,1731857426845, sessionid=0x101268d141f0000, setting cluster-up flag (Was=false) 2024-11-17T15:30:27,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33399-0x101268d141f0000, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:30:27,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45235-0x101268d141f0001, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:30:27,048 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-17T15:30:27,049 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7a780d55532c,33399,1731857426845 2024-11-17T15:30:27,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33399-0x101268d141f0000, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:30:27,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45235-0x101268d141f0001, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:30:27,059 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-17T15:30:27,060 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7a780d55532c,33399,1731857426845 2024-11-17T15:30:27,061 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-17T15:30:27,063 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-17T15:30:27,064 INFO [master/7a780d55532c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-17T15:30:27,064 INFO [master/7a780d55532c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-17T15:30:27,064 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7a780d55532c,33399,1731857426845 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-17T15:30:27,065 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7a780d55532c:0, corePoolSize=5, maxPoolSize=5 2024-11-17T15:30:27,065 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7a780d55532c:0, corePoolSize=5, maxPoolSize=5 2024-11-17T15:30:27,065 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7a780d55532c:0, corePoolSize=5, maxPoolSize=5 2024-11-17T15:30:27,065 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7a780d55532c:0, corePoolSize=5, maxPoolSize=5 2024-11-17T15:30:27,065 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7a780d55532c:0, corePoolSize=10, maxPoolSize=10 2024-11-17T15:30:27,065 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:30:27,065 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7a780d55532c:0, corePoolSize=2, maxPoolSize=2 2024-11-17T15:30:27,066 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:30:27,067 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T15:30:27,067 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-17T15:30:27,068 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:30:27,068 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-17T15:30:27,072 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731857457072 2024-11-17T15:30:27,073 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-17T15:30:27,073 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-17T15:30:27,073 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-17T15:30:27,073 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-17T15:30:27,073 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-17T15:30:27,073 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-17T15:30:27,073 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T15:30:27,074 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-17T15:30:27,074 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-17T15:30:27,074 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-17T15:30:27,075 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-17T15:30:27,076 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-17T15:30:27,076 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.large.0-1731857427076,5,FailOnTimeoutGroup] 2024-11-17T15:30:27,077 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.small.0-1731857427076,5,FailOnTimeoutGroup] 2024-11-17T15:30:27,077 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T15:30:27,077 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-17T15:30:27,077 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-17T15:30:27,077 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-17T15:30:27,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46793 is added to blk_1073741831_1007 (size=1321) 2024-11-17T15:30:27,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32795 is added to blk_1073741831_1007 (size=1321) 2024-11-17T15:30:27,080 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-17T15:30:27,081 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc 2024-11-17T15:30:27,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32795 is added to blk_1073741832_1008 (size=32) 2024-11-17T15:30:27,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46793 is added to blk_1073741832_1008 (size=32) 2024-11-17T15:30:27,091 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:30:27,092 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T15:30:27,093 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T15:30:27,093 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:30:27,094 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:30:27,094 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T15:30:27,095 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T15:30:27,095 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:30:27,096 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:30:27,096 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T15:30:27,097 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T15:30:27,097 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:30:27,098 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:30:27,098 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T15:30:27,099 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T15:30:27,099 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:30:27,099 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:30:27,099 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T15:30:27,100 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/data/hbase/meta/1588230740 2024-11-17T15:30:27,100 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/data/hbase/meta/1588230740 2024-11-17T15:30:27,102 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T15:30:27,102 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T15:30:27,102 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T15:30:27,103 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T15:30:27,105 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T15:30:27,106 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=871808, jitterRate=0.10856206715106964}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T15:30:27,106 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731857427091Initializing all the Stores at 1731857427092 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857427092Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857427092Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857427092Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857427092Cleaning up temporary data from old regions at 1731857427102 (+10 ms)Region opened successfully at 1731857427106 (+4 ms) 2024-11-17T15:30:27,106 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T15:30:27,106 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T15:30:27,106 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T15:30:27,106 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T15:30:27,106 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T15:30:27,107 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T15:30:27,107 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731857427106Disabling compacts and flushes for region at 1731857427106Disabling writes for close at 1731857427106Writing region close event to WAL at 1731857427107 (+1 ms)Closed at 1731857427107 2024-11-17T15:30:27,108 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T15:30:27,108 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-17T15:30:27,108 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-17T15:30:27,110 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T15:30:27,111 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-17T15:30:27,116 INFO [RS:0;7a780d55532c:45235 {}] regionserver.HRegionServer(746): ClusterId : e8fe7ad3-639a-468f-a88d-ea7629c61bbf 2024-11-17T15:30:27,117 DEBUG [RS:0;7a780d55532c:45235 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-17T15:30:27,120 DEBUG [RS:0;7a780d55532c:45235 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-17T15:30:27,120 DEBUG [RS:0;7a780d55532c:45235 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-17T15:30:27,123 DEBUG [RS:0;7a780d55532c:45235 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-17T15:30:27,123 DEBUG [RS:0;7a780d55532c:45235 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@634c11af, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7a780d55532c/172.17.0.2:0 2024-11-17T15:30:27,136 DEBUG [RS:0;7a780d55532c:45235 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7a780d55532c:45235 2024-11-17T15:30:27,136 INFO [RS:0;7a780d55532c:45235 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-17T15:30:27,136 INFO [RS:0;7a780d55532c:45235 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-17T15:30:27,136 DEBUG [RS:0;7a780d55532c:45235 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-17T15:30:27,137 INFO [RS:0;7a780d55532c:45235 {}] regionserver.HRegionServer(2659): reportForDuty to master=7a780d55532c,33399,1731857426845 with port=45235, startcode=1731857426896 2024-11-17T15:30:27,137 DEBUG [RS:0;7a780d55532c:45235 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-17T15:30:27,139 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32897, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-17T15:30:27,140 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33399 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7a780d55532c,45235,1731857426896 2024-11-17T15:30:27,140 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33399 {}] master.ServerManager(517): Registering regionserver=7a780d55532c,45235,1731857426896 2024-11-17T15:30:27,142 DEBUG [RS:0;7a780d55532c:45235 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc 2024-11-17T15:30:27,142 DEBUG [RS:0;7a780d55532c:45235 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43551 2024-11-17T15:30:27,142 DEBUG [RS:0;7a780d55532c:45235 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-17T15:30:27,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33399-0x101268d141f0000, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T15:30:27,145 DEBUG [RS:0;7a780d55532c:45235 {}] zookeeper.ZKUtil(111): regionserver:45235-0x101268d141f0001, quorum=127.0.0.1:56215, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7a780d55532c,45235,1731857426896 2024-11-17T15:30:27,145 WARN [RS:0;7a780d55532c:45235 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T15:30:27,145 INFO [RS:0;7a780d55532c:45235 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T15:30:27,145 DEBUG [RS:0;7a780d55532c:45235 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896 2024-11-17T15:30:27,145 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7a780d55532c,45235,1731857426896] 2024-11-17T15:30:27,149 INFO [RS:0;7a780d55532c:45235 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-17T15:30:27,151 INFO [RS:0;7a780d55532c:45235 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-17T15:30:27,151 INFO [RS:0;7a780d55532c:45235 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-17T15:30:27,151 INFO [RS:0;7a780d55532c:45235 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T15:30:27,152 INFO [RS:0;7a780d55532c:45235 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-17T15:30:27,153 INFO [RS:0;7a780d55532c:45235 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-17T15:30:27,153 INFO [RS:0;7a780d55532c:45235 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-17T15:30:27,153 DEBUG [RS:0;7a780d55532c:45235 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:30:27,153 DEBUG [RS:0;7a780d55532c:45235 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:30:27,153 DEBUG [RS:0;7a780d55532c:45235 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:30:27,153 DEBUG [RS:0;7a780d55532c:45235 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:30:27,153 DEBUG [RS:0;7a780d55532c:45235 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:30:27,153 DEBUG [RS:0;7a780d55532c:45235 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7a780d55532c:0, corePoolSize=2, maxPoolSize=2 2024-11-17T15:30:27,153 DEBUG [RS:0;7a780d55532c:45235 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:30:27,153 DEBUG [RS:0;7a780d55532c:45235 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:30:27,153 DEBUG [RS:0;7a780d55532c:45235 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:30:27,153 DEBUG [RS:0;7a780d55532c:45235 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:30:27,153 DEBUG [RS:0;7a780d55532c:45235 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:30:27,154 DEBUG [RS:0;7a780d55532c:45235 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:30:27,154 DEBUG [RS:0;7a780d55532c:45235 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7a780d55532c:0, corePoolSize=3, maxPoolSize=3 2024-11-17T15:30:27,154 DEBUG [RS:0;7a780d55532c:45235 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0, corePoolSize=3, maxPoolSize=3 2024-11-17T15:30:27,156 INFO [RS:0;7a780d55532c:45235 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T15:30:27,156 INFO [RS:0;7a780d55532c:45235 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T15:30:27,156 INFO [RS:0;7a780d55532c:45235 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T15:30:27,156 INFO [RS:0;7a780d55532c:45235 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-17T15:30:27,156 INFO [RS:0;7a780d55532c:45235 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-17T15:30:27,156 INFO [RS:0;7a780d55532c:45235 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,45235,1731857426896-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T15:30:27,171 INFO [RS:0;7a780d55532c:45235 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-17T15:30:27,172 INFO [RS:0;7a780d55532c:45235 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,45235,1731857426896-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T15:30:27,172 INFO [RS:0;7a780d55532c:45235 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:30:27,172 INFO [RS:0;7a780d55532c:45235 {}] regionserver.Replication(171): 7a780d55532c,45235,1731857426896 started 2024-11-17T15:30:27,186 INFO [RS:0;7a780d55532c:45235 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:30:27,186 INFO [RS:0;7a780d55532c:45235 {}] regionserver.HRegionServer(1482): Serving as 7a780d55532c,45235,1731857426896, RpcServer on 7a780d55532c/172.17.0.2:45235, sessionid=0x101268d141f0001 2024-11-17T15:30:27,186 DEBUG [RS:0;7a780d55532c:45235 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-17T15:30:27,186 DEBUG [RS:0;7a780d55532c:45235 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7a780d55532c,45235,1731857426896 2024-11-17T15:30:27,186 DEBUG [RS:0;7a780d55532c:45235 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a780d55532c,45235,1731857426896' 2024-11-17T15:30:27,186 DEBUG [RS:0;7a780d55532c:45235 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-17T15:30:27,187 DEBUG [RS:0;7a780d55532c:45235 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-17T15:30:27,187 DEBUG [RS:0;7a780d55532c:45235 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-17T15:30:27,187 DEBUG [RS:0;7a780d55532c:45235 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-17T15:30:27,187 DEBUG [RS:0;7a780d55532c:45235 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7a780d55532c,45235,1731857426896 2024-11-17T15:30:27,187 DEBUG [RS:0;7a780d55532c:45235 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a780d55532c,45235,1731857426896' 2024-11-17T15:30:27,187 DEBUG [RS:0;7a780d55532c:45235 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-17T15:30:27,187 DEBUG [RS:0;7a780d55532c:45235 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-17T15:30:27,188 DEBUG [RS:0;7a780d55532c:45235 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-17T15:30:27,188 INFO [RS:0;7a780d55532c:45235 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-17T15:30:27,188 INFO [RS:0;7a780d55532c:45235 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-17T15:30:27,261 WARN [7a780d55532c:33399 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-17T15:30:27,290 INFO [RS:0;7a780d55532c:45235 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a780d55532c%2C45235%2C1731857426896, suffix=, logDir=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896, archiveDir=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/oldWALs, maxLogs=32 2024-11-17T15:30:27,290 INFO [RS:0;7a780d55532c:45235 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C45235%2C1731857426896.1731857427290 2024-11-17T15:30:27,297 INFO [RS:0;7a780d55532c:45235 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857427290 2024-11-17T15:30:27,297 DEBUG [RS:0;7a780d55532c:45235 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38115:38115),(127.0.0.1/127.0.0.1:34157:34157)] 2024-11-17T15:30:27,511 DEBUG [7a780d55532c:33399 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-17T15:30:27,512 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7a780d55532c,45235,1731857426896 2024-11-17T15:30:27,513 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7a780d55532c,45235,1731857426896, state=OPENING 2024-11-17T15:30:27,515 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-17T15:30:27,518 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33399-0x101268d141f0000, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:30:27,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45235-0x101268d141f0001, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:30:27,519 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T15:30:27,519 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T15:30:27,519 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T15:30:27,519 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7a780d55532c,45235,1731857426896}] 2024-11-17T15:30:27,672 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-17T15:30:27,674 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33311, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-17T15:30:27,678 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-17T15:30:27,678 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T15:30:27,680 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a780d55532c%2C45235%2C1731857426896.meta, suffix=.meta, logDir=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896, archiveDir=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/oldWALs, maxLogs=32 2024-11-17T15:30:27,681 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C45235%2C1731857426896.meta.1731857427680.meta 2024-11-17T15:30:27,686 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.meta.1731857427680.meta 2024-11-17T15:30:27,686 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34157:34157),(127.0.0.1/127.0.0.1:38115:38115)] 2024-11-17T15:30:27,687 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-17T15:30:27,687 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-17T15:30:27,687 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-17T15:30:27,687 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-17T15:30:27,687 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-17T15:30:27,688 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:30:27,688 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-17T15:30:27,688 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-17T15:30:27,689 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T15:30:27,690 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T15:30:27,690 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:30:27,690 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:30:27,690 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T15:30:27,691 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T15:30:27,691 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:30:27,691 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:30:27,691 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T15:30:27,692 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T15:30:27,692 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:30:27,692 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:30:27,693 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T15:30:27,693 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T15:30:27,693 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:30:27,694 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:30:27,694 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T15:30:27,694 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/data/hbase/meta/1588230740 2024-11-17T15:30:27,695 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/data/hbase/meta/1588230740 2024-11-17T15:30:27,696 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T15:30:27,697 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T15:30:27,697 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T15:30:27,698 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T15:30:27,699 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=743185, jitterRate=-0.0549919456243515}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T15:30:27,699 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-17T15:30:27,700 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731857427688Writing region info on filesystem at 1731857427688Initializing all the Stores at 1731857427689 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857427689Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857427689Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857427689Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857427689Cleaning up temporary data from old regions at 1731857427697 (+8 ms)Running coprocessor post-open hooks at 1731857427699 (+2 ms)Region opened successfully at 1731857427700 (+1 ms) 2024-11-17T15:30:27,701 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731857427672 2024-11-17T15:30:27,703 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-17T15:30:27,703 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-17T15:30:27,704 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7a780d55532c,45235,1731857426896 2024-11-17T15:30:27,705 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7a780d55532c,45235,1731857426896, state=OPEN 2024-11-17T15:30:27,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33399-0x101268d141f0000, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T15:30:27,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45235-0x101268d141f0001, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T15:30:27,710 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T15:30:27,710 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T15:30:27,710 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7a780d55532c,45235,1731857426896 2024-11-17T15:30:27,714 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-17T15:30:27,714 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7a780d55532c,45235,1731857426896 in 191 msec 2024-11-17T15:30:27,716 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-17T15:30:27,716 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 606 msec 2024-11-17T15:30:27,717 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T15:30:27,717 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-17T15:30:27,718 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T15:30:27,718 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7a780d55532c,45235,1731857426896, seqNum=-1] 2024-11-17T15:30:27,719 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T15:30:27,720 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36889, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T15:30:27,725 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 661 msec 2024-11-17T15:30:27,725 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731857427725, completionTime=-1 2024-11-17T15:30:27,725 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-17T15:30:27,725 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-17T15:30:27,726 INFO [master/7a780d55532c:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-17T15:30:27,727 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731857487726 2024-11-17T15:30:27,727 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731857547727 2024-11-17T15:30:27,727 INFO [master/7a780d55532c:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-17T15:30:27,727 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,33399,1731857426845-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T15:30:27,727 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,33399,1731857426845-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:30:27,727 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,33399,1731857426845-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:30:27,727 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7a780d55532c:33399, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:30:27,727 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-17T15:30:27,727 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-17T15:30:27,729 DEBUG [master/7a780d55532c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-17T15:30:27,730 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.795sec 2024-11-17T15:30:27,730 INFO [master/7a780d55532c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-17T15:30:27,731 INFO [master/7a780d55532c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-17T15:30:27,731 INFO [master/7a780d55532c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-17T15:30:27,731 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-17T15:30:27,731 INFO [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-17T15:30:27,731 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,33399,1731857426845-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T15:30:27,731 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,33399,1731857426845-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-17T15:30:27,731 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:27,733 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-17T15:30:27,733 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-17T15:30:27,733 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,33399,1731857426845-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:30:27,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:27,817 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64ad885f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T15:30:27,817 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7a780d55532c,33399,-1 for getting cluster id 2024-11-17T15:30:27,817 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-17T15:30:27,819 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e8fe7ad3-639a-468f-a88d-ea7629c61bbf' 2024-11-17T15:30:27,819 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-17T15:30:27,820 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e8fe7ad3-639a-468f-a88d-ea7629c61bbf" 2024-11-17T15:30:27,820 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@127225e1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T15:30:27,820 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7a780d55532c,33399,-1] 2024-11-17T15:30:27,820 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-17T15:30:27,820 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:30:27,822 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33836, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-17T15:30:27,823 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@55f4ab1a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T15:30:27,823 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T15:30:27,824 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7a780d55532c,45235,1731857426896, seqNum=-1] 2024-11-17T15:30:27,825 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T15:30:27,826 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56460, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T15:30:27,828 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7a780d55532c,33399,1731857426845 2024-11-17T15:30:27,828 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:30:27,831 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-17T15:30:27,831 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-17T15:30:27,831 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-17T15:30:27,831 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-17T15:30:27,832 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 7a780d55532c,33399,1731857426845 2024-11-17T15:30:27,832 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@502bcdb0 2024-11-17T15:30:27,832 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-17T15:30:27,834 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33848, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-17T15:30:27,835 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33399 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-17T15:30:27,835 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33399 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-17T15:30:27,835 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33399 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T15:30:27,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33399 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-17T15:30:27,838 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-17T15:30:27,838 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:30:27,838 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33399 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-17T15:30:27,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33399 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-17T15:30:27,840 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-17T15:30:27,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32795 is added to blk_1073741835_1011 (size=395) 2024-11-17T15:30:27,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46793 is added to blk_1073741835_1011 (size=395) 2024-11-17T15:30:27,852 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 69a24af8f4caa18cd5e83f4aea481c33, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731857427835.69a24af8f4caa18cd5e83f4aea481c33.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc 2024-11-17T15:30:27,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46793 is added to blk_1073741836_1012 (size=78) 2024-11-17T15:30:27,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32795 is added to blk_1073741836_1012 (size=78) 2024-11-17T15:30:27,859 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731857427835.69a24af8f4caa18cd5e83f4aea481c33.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:30:27,859 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 69a24af8f4caa18cd5e83f4aea481c33, disabling compactions & flushes 2024-11-17T15:30:27,859 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731857427835.69a24af8f4caa18cd5e83f4aea481c33. 2024-11-17T15:30:27,859 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731857427835.69a24af8f4caa18cd5e83f4aea481c33. 2024-11-17T15:30:27,859 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731857427835.69a24af8f4caa18cd5e83f4aea481c33. after waiting 0 ms 2024-11-17T15:30:27,859 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731857427835.69a24af8f4caa18cd5e83f4aea481c33. 2024-11-17T15:30:27,859 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731857427835.69a24af8f4caa18cd5e83f4aea481c33. 2024-11-17T15:30:27,859 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 69a24af8f4caa18cd5e83f4aea481c33: Waiting for close lock at 1731857427859Disabling compacts and flushes for region at 1731857427859Disabling writes for close at 1731857427859Writing region close event to WAL at 1731857427859Closed at 1731857427859 2024-11-17T15:30:27,861 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-17T15:30:27,861 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731857427835.69a24af8f4caa18cd5e83f4aea481c33.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731857427861"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731857427861"}]},"ts":"1731857427861"} 2024-11-17T15:30:27,864 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-17T15:30:27,865 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-17T15:30:27,865 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731857427865"}]},"ts":"1731857427865"} 2024-11-17T15:30:27,867 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-17T15:30:27,868 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=69a24af8f4caa18cd5e83f4aea481c33, ASSIGN}] 2024-11-17T15:30:27,869 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=69a24af8f4caa18cd5e83f4aea481c33, ASSIGN 2024-11-17T15:30:27,870 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=69a24af8f4caa18cd5e83f4aea481c33, ASSIGN; state=OFFLINE, location=7a780d55532c,45235,1731857426896; forceNewPlan=false, retain=false 2024-11-17T15:30:28,020 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=69a24af8f4caa18cd5e83f4aea481c33, regionState=OPENING, regionLocation=7a780d55532c,45235,1731857426896 2024-11-17T15:30:28,023 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=69a24af8f4caa18cd5e83f4aea481c33, ASSIGN because future has completed 2024-11-17T15:30:28,024 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 69a24af8f4caa18cd5e83f4aea481c33, server=7a780d55532c,45235,1731857426896}] 2024-11-17T15:30:28,180 INFO [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731857427835.69a24af8f4caa18cd5e83f4aea481c33. 2024-11-17T15:30:28,180 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 69a24af8f4caa18cd5e83f4aea481c33, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731857427835.69a24af8f4caa18cd5e83f4aea481c33.', STARTKEY => '', ENDKEY => ''} 2024-11-17T15:30:28,181 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 69a24af8f4caa18cd5e83f4aea481c33 2024-11-17T15:30:28,181 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731857427835.69a24af8f4caa18cd5e83f4aea481c33.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:30:28,181 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 69a24af8f4caa18cd5e83f4aea481c33 2024-11-17T15:30:28,181 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 69a24af8f4caa18cd5e83f4aea481c33 2024-11-17T15:30:28,182 INFO [StoreOpener-69a24af8f4caa18cd5e83f4aea481c33-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 69a24af8f4caa18cd5e83f4aea481c33 2024-11-17T15:30:28,184 INFO [StoreOpener-69a24af8f4caa18cd5e83f4aea481c33-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 69a24af8f4caa18cd5e83f4aea481c33 columnFamilyName info 2024-11-17T15:30:28,184 DEBUG [StoreOpener-69a24af8f4caa18cd5e83f4aea481c33-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:30:28,184 INFO [StoreOpener-69a24af8f4caa18cd5e83f4aea481c33-1 {}] regionserver.HStore(327): Store=69a24af8f4caa18cd5e83f4aea481c33/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T15:30:28,184 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 69a24af8f4caa18cd5e83f4aea481c33 2024-11-17T15:30:28,185 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/data/default/TestLogRolling-testLogRollOnPipelineRestart/69a24af8f4caa18cd5e83f4aea481c33 2024-11-17T15:30:28,185 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/data/default/TestLogRolling-testLogRollOnPipelineRestart/69a24af8f4caa18cd5e83f4aea481c33 2024-11-17T15:30:28,185 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 69a24af8f4caa18cd5e83f4aea481c33 2024-11-17T15:30:28,186 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 69a24af8f4caa18cd5e83f4aea481c33 2024-11-17T15:30:28,187 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 69a24af8f4caa18cd5e83f4aea481c33 2024-11-17T15:30:28,189 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/data/default/TestLogRolling-testLogRollOnPipelineRestart/69a24af8f4caa18cd5e83f4aea481c33/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T15:30:28,189 INFO [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 69a24af8f4caa18cd5e83f4aea481c33; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=863001, jitterRate=0.09736323356628418}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-17T15:30:28,189 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 69a24af8f4caa18cd5e83f4aea481c33 2024-11-17T15:30:28,190 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 69a24af8f4caa18cd5e83f4aea481c33: Running coprocessor pre-open hook at 1731857428181Writing region info on filesystem at 1731857428181Initializing all the Stores at 1731857428182 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857428182Cleaning up temporary data from old regions at 1731857428186 (+4 ms)Running coprocessor post-open hooks at 1731857428189 (+3 ms)Region opened successfully at 1731857428190 (+1 ms) 2024-11-17T15:30:28,191 INFO [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731857427835.69a24af8f4caa18cd5e83f4aea481c33., pid=6, masterSystemTime=1731857428176 2024-11-17T15:30:28,193 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731857427835.69a24af8f4caa18cd5e83f4aea481c33. 2024-11-17T15:30:28,193 INFO [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731857427835.69a24af8f4caa18cd5e83f4aea481c33. 2024-11-17T15:30:28,194 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=69a24af8f4caa18cd5e83f4aea481c33, regionState=OPEN, openSeqNum=2, regionLocation=7a780d55532c,45235,1731857426896 2024-11-17T15:30:28,197 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 69a24af8f4caa18cd5e83f4aea481c33, server=7a780d55532c,45235,1731857426896 because future has completed 2024-11-17T15:30:28,202 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-17T15:30:28,202 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 69a24af8f4caa18cd5e83f4aea481c33, server=7a780d55532c,45235,1731857426896 in 175 msec 2024-11-17T15:30:28,204 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-17T15:30:28,204 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=69a24af8f4caa18cd5e83f4aea481c33, ASSIGN in 335 msec 2024-11-17T15:30:28,206 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-17T15:30:28,206 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731857428206"}]},"ts":"1731857428206"} 2024-11-17T15:30:28,208 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-17T15:30:28,209 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-17T15:30:28,211 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 374 msec 2024-11-17T15:30:28,732 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:28,736 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:29,732 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:29,736 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:30,733 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:30,737 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:31,379 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-17T15:30:31,379 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-17T15:30:31,380 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-17T15:30:31,380 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-17T15:30:31,380 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T15:30:31,381 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-17T15:30:31,733 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:31,737 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:32,734 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:32,738 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:33,190 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-17T15:30:33,204 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:33,204 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:33,205 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:33,205 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:33,205 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:33,205 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:33,208 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:33,208 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:33,209 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:33,211 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:33,215 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-17T15:30:33,216 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-17T15:30:33,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:33,738 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:34,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:34,739 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:35,736 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:35,739 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:36,736 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:36,740 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:37,737 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:37,740 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:37,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33399 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-17T15:30:37,936 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-17T15:30:37,936 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-17T15:30:37,938 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-17T15:30:37,939 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731857427835.69a24af8f4caa18cd5e83f4aea481c33. 2024-11-17T15:30:37,942 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731857427835.69a24af8f4caa18cd5e83f4aea481c33., hostname=7a780d55532c,45235,1731857426896, seqNum=2] 2024-11-17T15:30:38,737 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:38,740 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:39,738 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:39,741 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:39,945 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857427290 2024-11-17T15:30:39,945 WARN [ResponseProcessor for block BP-1061960288-172.17.0.2-1731857426049:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1061960288-172.17.0.2-1731857426049:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:39,945 WARN [ResponseProcessor for block BP-1061960288-172.17.0.2-1731857426049:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1061960288-172.17.0.2-1731857426049:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-1061960288-172.17.0.2-1731857426049:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:32795,DS-7e13c295-e1da-44cc-b769-460087a89f8d,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:39,945 WARN [ResponseProcessor for block BP-1061960288-172.17.0.2-1731857426049:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1061960288-172.17.0.2-1731857426049:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1061960288-172.17.0.2-1731857426049:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:32795,DS-7e13c295-e1da-44cc-b769-460087a89f8d,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:39,946 WARN [DataStreamer for file /user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.meta.1731857427680.meta block BP-1061960288-172.17.0.2-1731857426049:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1061960288-172.17.0.2-1731857426049:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32795,DS-7e13c295-e1da-44cc-b769-460087a89f8d,DISK], DatanodeInfoWithStorage[127.0.0.1:46793,DS-a395277a-0be1-49dc-816c-4f82b4a2dbcc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32795,DS-7e13c295-e1da-44cc-b769-460087a89f8d,DISK]) is bad. 2024-11-17T15:30:39,946 WARN [DataStreamer for file /user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857427290 block BP-1061960288-172.17.0.2-1731857426049:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1061960288-172.17.0.2-1731857426049:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46793,DS-a395277a-0be1-49dc-816c-4f82b4a2dbcc,DISK], DatanodeInfoWithStorage[127.0.0.1:32795,DS-7e13c295-e1da-44cc-b769-460087a89f8d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32795,DS-7e13c295-e1da-44cc-b769-460087a89f8d,DISK]) is bad. 2024-11-17T15:30:39,946 WARN [DataStreamer for file /user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/WALs/7a780d55532c,33399,1731857426845/7a780d55532c%2C33399%2C1731857426845.1731857426989 block BP-1061960288-172.17.0.2-1731857426049:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1061960288-172.17.0.2-1731857426049:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46793,DS-a395277a-0be1-49dc-816c-4f82b4a2dbcc,DISK], DatanodeInfoWithStorage[127.0.0.1:32795,DS-7e13c295-e1da-44cc-b769-460087a89f8d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32795,DS-7e13c295-e1da-44cc-b769-460087a89f8d,DISK]) is bad. 2024-11-17T15:30:39,946 WARN [PacketResponder: BP-1061960288-172.17.0.2-1731857426049:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:32795] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:39,946 WARN [PacketResponder: BP-1061960288-172.17.0.2-1731857426049:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:32795] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:39,946 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1226328680_22 at /127.0.0.1:60686 [Receiving block BP-1061960288-172.17.0.2-1731857426049:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:32795:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60686 dst: /127.0.0.1:32795 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:39,947 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1226328680_22 at /127.0.0.1:39014 [Receiving block BP-1061960288-172.17.0.2-1731857426049:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:46793:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39014 dst: /127.0.0.1:46793 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:39,947 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1226328680_22 at /127.0.0.1:39000 [Receiving block BP-1061960288-172.17.0.2-1731857426049:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:46793:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39000 dst: /127.0.0.1:46793 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:39,947 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_660180343_22 at /127.0.0.1:38970 [Receiving block BP-1061960288-172.17.0.2-1731857426049:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46793:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38970 dst: /127.0.0.1:46793 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:39,947 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_660180343_22 at /127.0.0.1:60658 [Receiving block BP-1061960288-172.17.0.2-1731857426049:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:32795:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60658 dst: /127.0.0.1:32795 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:39,947 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1226328680_22 at /127.0.0.1:60682 [Receiving block BP-1061960288-172.17.0.2-1731857426049:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:32795:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60682 dst: /127.0.0.1:32795 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:39,949 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@aae1781{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:30:39,949 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4ef0fe27{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T15:30:39,950 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T15:30:39,950 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@794cb94f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T15:30:39,950 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21998c84{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/hadoop.log.dir/,STOPPED} 2024-11-17T15:30:39,951 WARN [BP-1061960288-172.17.0.2-1731857426049 heartbeating to localhost/127.0.0.1:43551 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T15:30:39,951 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T15:30:39,951 WARN [BP-1061960288-172.17.0.2-1731857426049 heartbeating to localhost/127.0.0.1:43551 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1061960288-172.17.0.2-1731857426049 (Datanode Uuid 12f6a0f3-ac45-417e-acc2-7b19ff10e633) service to localhost/127.0.0.1:43551 2024-11-17T15:30:39,951 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T15:30:39,952 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/cluster_51169b72-0b57-551e-5b79-ff6d39288205/data/data3/current/BP-1061960288-172.17.0.2-1731857426049 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:30:39,952 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/cluster_51169b72-0b57-551e-5b79-ff6d39288205/data/data4/current/BP-1061960288-172.17.0.2-1731857426049 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:30:39,952 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T15:30:39,962 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:30:39,965 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T15:30:39,966 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T15:30:39,966 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T15:30:39,966 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T15:30:39,966 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@b4c4f18{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/hadoop.log.dir/,AVAILABLE} 2024-11-17T15:30:39,966 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e37a2ab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T15:30:40,080 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7f49120b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/java.io.tmpdir/jetty-localhost-35637-hadoop-hdfs-3_4_1-tests_jar-_-any-14386823519234149138/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:30:40,080 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4f9f08a5{HTTP/1.1, (http/1.1)}{localhost:35637} 2024-11-17T15:30:40,080 INFO [Time-limited test {}] server.Server(415): Started @164002ms 2024-11-17T15:30:40,081 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T15:30:40,101 WARN [ResponseProcessor for block BP-1061960288-172.17.0.2-1731857426049:blk_1073741834_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1061960288-172.17.0.2-1731857426049:blk_1073741834_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:40,101 WARN [ResponseProcessor for block BP-1061960288-172.17.0.2-1731857426049:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1061960288-172.17.0.2-1731857426049:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:40,101 WARN [ResponseProcessor for block BP-1061960288-172.17.0.2-1731857426049:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1061960288-172.17.0.2-1731857426049:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:40,101 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_660180343_22 at /127.0.0.1:34252 [Receiving block BP-1061960288-172.17.0.2-1731857426049:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46793:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34252 dst: /127.0.0.1:46793 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:40,101 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1226328680_22 at /127.0.0.1:34236 [Receiving block BP-1061960288-172.17.0.2-1731857426049:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:46793:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34236 dst: /127.0.0.1:46793 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:40,102 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1226328680_22 at /127.0.0.1:34250 [Receiving block BP-1061960288-172.17.0.2-1731857426049:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:46793:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34250 dst: /127.0.0.1:46793 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:40,103 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@131e974b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:30:40,103 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5aff23e7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T15:30:40,103 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T15:30:40,103 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7f6bebc0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T15:30:40,103 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3b3376d9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/hadoop.log.dir/,STOPPED} 2024-11-17T15:30:40,104 WARN [BP-1061960288-172.17.0.2-1731857426049 heartbeating to localhost/127.0.0.1:43551 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T15:30:40,104 WARN [BP-1061960288-172.17.0.2-1731857426049 heartbeating to localhost/127.0.0.1:43551 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1061960288-172.17.0.2-1731857426049 (Datanode Uuid 447dae23-b803-487e-bf73-7d03e699e225) service to localhost/127.0.0.1:43551 2024-11-17T15:30:40,105 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/cluster_51169b72-0b57-551e-5b79-ff6d39288205/data/data1/current/BP-1061960288-172.17.0.2-1731857426049 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:30:40,105 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/cluster_51169b72-0b57-551e-5b79-ff6d39288205/data/data2/current/BP-1061960288-172.17.0.2-1731857426049 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:30:40,105 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T15:30:40,105 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T15:30:40,105 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T15:30:40,113 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:30:40,116 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T15:30:40,116 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T15:30:40,116 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T15:30:40,116 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T15:30:40,117 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@54cda05a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/hadoop.log.dir/,AVAILABLE} 2024-11-17T15:30:40,117 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3393bacb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T15:30:40,171 WARN [Thread-1350 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T15:30:40,174 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdcf3051f3ec91c76 with lease ID 0x7016e82585fa58f2: from storage DS-7e13c295-e1da-44cc-b769-460087a89f8d node DatanodeRegistration(127.0.0.1:43439, datanodeUuid=12f6a0f3-ac45-417e-acc2-7b19ff10e633, infoPort=36123, infoSecurePort=0, ipcPort=37343, storageInfo=lv=-57;cid=testClusterID;nsid=1457268781;c=1731857426049), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-17T15:30:40,174 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdcf3051f3ec91c76 with lease ID 0x7016e82585fa58f2: from storage DS-eaed7fcb-5dba-4f52-a703-df65103416d1 node DatanodeRegistration(127.0.0.1:43439, datanodeUuid=12f6a0f3-ac45-417e-acc2-7b19ff10e633, infoPort=36123, infoSecurePort=0, ipcPort=37343, storageInfo=lv=-57;cid=testClusterID;nsid=1457268781;c=1731857426049), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T15:30:40,271 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5f19ade1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/java.io.tmpdir/jetty-localhost-41491-hadoop-hdfs-3_4_1-tests_jar-_-any-8484013910333806369/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:30:40,271 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5fb420d9{HTTP/1.1, (http/1.1)}{localhost:41491} 2024-11-17T15:30:40,271 INFO [Time-limited test {}] server.Server(415): Started @164193ms 2024-11-17T15:30:40,272 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T15:30:40,356 WARN [Thread-1381 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T15:30:40,359 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8e3f93bfb8ce735 with lease ID 0x7016e82585fa58f3: from storage DS-a395277a-0be1-49dc-816c-4f82b4a2dbcc node DatanodeRegistration(127.0.0.1:45309, datanodeUuid=447dae23-b803-487e-bf73-7d03e699e225, infoPort=35603, infoSecurePort=0, ipcPort=43027, storageInfo=lv=-57;cid=testClusterID;nsid=1457268781;c=1731857426049), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-17T15:30:40,359 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8e3f93bfb8ce735 with lease ID 0x7016e82585fa58f3: from storage DS-b7dc572d-11c4-4046-8f20-dc70f33bf181 node DatanodeRegistration(127.0.0.1:45309, datanodeUuid=447dae23-b803-487e-bf73-7d03e699e225, infoPort=35603, infoSecurePort=0, ipcPort=43027, storageInfo=lv=-57;cid=testClusterID;nsid=1457268781;c=1731857426049), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T15:30:40,739 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:40,741 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:41,290 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-17T15:30:41,292 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-17T15:30:41,294 ERROR [FSHLog-0-hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc-prefix:7a780d55532c,45235,1731857426896 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46793,DS-a395277a-0be1-49dc-816c-4f82b4a2dbcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:41,294 WARN [FSHLog-0-hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc-prefix:7a780d55532c,45235,1731857426896 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46793,DS-a395277a-0be1-49dc-816c-4f82b4a2dbcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:41,294 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7a780d55532c%2C45235%2C1731857426896:(num 1731857427290) roll requested 2024-11-17T15:30:41,294 INFO [regionserver/7a780d55532c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C45235%2C1731857426896.1731857441294 2024-11-17T15:30:41,300 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857427290 newFile=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857441294 2024-11-17T15:30:41,300 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:41,300 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:41,300 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:41,300 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:41,300 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:41,301 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857427290 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857441294 2024-11-17T15:30:41,301 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46793,DS-a395277a-0be1-49dc-816c-4f82b4a2dbcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:41,301 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46793,DS-a395277a-0be1-49dc-816c-4f82b4a2dbcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:41,301 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857427290 2024-11-17T15:30:41,301 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36123:36123),(127.0.0.1/127.0.0.1:35603:35603)] 2024-11-17T15:30:41,301 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857427290 is not closed yet, will try archiving it next time 2024-11-17T15:30:41,301 WARN [IPC Server handler 1 on default port 43551 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857427290 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-17T15:30:41,302 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857427290 after 1ms 2024-11-17T15:30:41,739 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:41,742 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:42,740 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:42,742 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:43,305 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-17T15:30:43,740 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:43,743 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:44,741 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:44,743 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:45,174 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-17T15:30:45,303 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857427290 after 4001ms 2024-11-17T15:30:45,308 WARN [ResponseProcessor for block BP-1061960288-172.17.0.2-1731857426049:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1061960288-172.17.0.2-1731857426049:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-1061960288-172.17.0.2-1731857426049:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:45309,DS-a395277a-0be1-49dc-816c-4f82b4a2dbcc,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:45,308 WARN [DataStreamer for file /user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857441294 block BP-1061960288-172.17.0.2-1731857426049:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1061960288-172.17.0.2-1731857426049:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43439,DS-7e13c295-e1da-44cc-b769-460087a89f8d,DISK], DatanodeInfoWithStorage[127.0.0.1:45309,DS-a395277a-0be1-49dc-816c-4f82b4a2dbcc,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45309,DS-a395277a-0be1-49dc-816c-4f82b4a2dbcc,DISK]) is bad. 2024-11-17T15:30:45,308 WARN [PacketResponder: BP-1061960288-172.17.0.2-1731857426049:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:45309] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:45,309 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1226328680_22 at /127.0.0.1:54870 [Receiving block BP-1061960288-172.17.0.2-1731857426049:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:43439:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54870 dst: /127.0.0.1:43439 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:45,309 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1226328680_22 at /127.0.0.1:37882 [Receiving block BP-1061960288-172.17.0.2-1731857426049:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:45309:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37882 dst: /127.0.0.1:45309 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:45,310 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5f19ade1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:30:45,311 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5fb420d9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T15:30:45,311 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T15:30:45,311 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3393bacb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T15:30:45,311 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@54cda05a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/hadoop.log.dir/,STOPPED} 2024-11-17T15:30:45,312 WARN [BP-1061960288-172.17.0.2-1731857426049 heartbeating to localhost/127.0.0.1:43551 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T15:30:45,312 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T15:30:45,312 WARN [BP-1061960288-172.17.0.2-1731857426049 heartbeating to localhost/127.0.0.1:43551 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1061960288-172.17.0.2-1731857426049 (Datanode Uuid 447dae23-b803-487e-bf73-7d03e699e225) service to localhost/127.0.0.1:43551 2024-11-17T15:30:45,312 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T15:30:45,313 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/cluster_51169b72-0b57-551e-5b79-ff6d39288205/data/data1/current/BP-1061960288-172.17.0.2-1731857426049 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:30:45,313 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/cluster_51169b72-0b57-551e-5b79-ff6d39288205/data/data2/current/BP-1061960288-172.17.0.2-1731857426049 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:30:45,313 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T15:30:45,322 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:30:45,325 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T15:30:45,326 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T15:30:45,326 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T15:30:45,326 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T15:30:45,326 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@27361061{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/hadoop.log.dir/,AVAILABLE} 2024-11-17T15:30:45,326 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@22699b58{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T15:30:45,442 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4bda5c57{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/java.io.tmpdir/jetty-localhost-42363-hadoop-hdfs-3_4_1-tests_jar-_-any-6660718852198042293/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:30:45,442 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@40b8cc2f{HTTP/1.1, (http/1.1)}{localhost:42363} 2024-11-17T15:30:45,442 INFO [Time-limited test {}] server.Server(415): Started @169364ms 2024-11-17T15:30:45,444 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T15:30:45,467 WARN [ResponseProcessor for block BP-1061960288-172.17.0.2-1731857426049:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1061960288-172.17.0.2-1731857426049:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:45,468 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1226328680_22 at /127.0.0.1:54880 [Receiving block BP-1061960288-172.17.0.2-1731857426049:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:43439:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54880 dst: /127.0.0.1:43439 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:45,469 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7f49120b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:30:45,469 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4f9f08a5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T15:30:45,469 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T15:30:45,470 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e37a2ab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T15:30:45,470 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@b4c4f18{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/hadoop.log.dir/,STOPPED} 2024-11-17T15:30:45,471 WARN [BP-1061960288-172.17.0.2-1731857426049 heartbeating to localhost/127.0.0.1:43551 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T15:30:45,471 WARN [BP-1061960288-172.17.0.2-1731857426049 heartbeating to localhost/127.0.0.1:43551 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1061960288-172.17.0.2-1731857426049 (Datanode Uuid 12f6a0f3-ac45-417e-acc2-7b19ff10e633) service to localhost/127.0.0.1:43551 2024-11-17T15:30:45,471 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T15:30:45,471 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T15:30:45,472 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/cluster_51169b72-0b57-551e-5b79-ff6d39288205/data/data3/current/BP-1061960288-172.17.0.2-1731857426049 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:30:45,472 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/cluster_51169b72-0b57-551e-5b79-ff6d39288205/data/data4/current/BP-1061960288-172.17.0.2-1731857426049 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:30:45,472 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T15:30:45,482 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:30:45,484 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T15:30:45,485 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T15:30:45,485 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T15:30:45,485 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T15:30:45,485 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1146b324{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/hadoop.log.dir/,AVAILABLE} 2024-11-17T15:30:45,486 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@511ae001{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T15:30:45,554 WARN [Thread-1424 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T15:30:45,557 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x93afe6e4178e9511 with lease ID 0x7016e82585fa58f4: from storage DS-a395277a-0be1-49dc-816c-4f82b4a2dbcc node DatanodeRegistration(127.0.0.1:41335, datanodeUuid=447dae23-b803-487e-bf73-7d03e699e225, infoPort=41265, infoSecurePort=0, ipcPort=36595, storageInfo=lv=-57;cid=testClusterID;nsid=1457268781;c=1731857426049), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-17T15:30:45,557 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x93afe6e4178e9511 with lease ID 0x7016e82585fa58f4: from storage DS-b7dc572d-11c4-4046-8f20-dc70f33bf181 node DatanodeRegistration(127.0.0.1:41335, datanodeUuid=447dae23-b803-487e-bf73-7d03e699e225, infoPort=41265, infoSecurePort=0, ipcPort=36595, storageInfo=lv=-57;cid=testClusterID;nsid=1457268781;c=1731857426049), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T15:30:45,600 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@27703b15{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/java.io.tmpdir/jetty-localhost-41667-hadoop-hdfs-3_4_1-tests_jar-_-any-4170700240480531681/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:30:45,601 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@51f33716{HTTP/1.1, (http/1.1)}{localhost:41667} 2024-11-17T15:30:45,601 INFO [Time-limited test {}] server.Server(415): Started @169522ms 2024-11-17T15:30:45,602 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T15:30:45,688 WARN [Thread-1455 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T15:30:45,691 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaf39c36b79695b26 with lease ID 0x7016e82585fa58f5: from storage DS-7e13c295-e1da-44cc-b769-460087a89f8d node DatanodeRegistration(127.0.0.1:40917, datanodeUuid=12f6a0f3-ac45-417e-acc2-7b19ff10e633, infoPort=46741, infoSecurePort=0, ipcPort=38537, storageInfo=lv=-57;cid=testClusterID;nsid=1457268781;c=1731857426049), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T15:30:45,692 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaf39c36b79695b26 with lease ID 0x7016e82585fa58f5: from storage DS-eaed7fcb-5dba-4f52-a703-df65103416d1 node DatanodeRegistration(127.0.0.1:40917, datanodeUuid=12f6a0f3-ac45-417e-acc2-7b19ff10e633, infoPort=46741, infoSecurePort=0, ipcPort=38537, storageInfo=lv=-57;cid=testClusterID;nsid=1457268781;c=1731857426049), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T15:30:45,742 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:45,744 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:46,619 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-17T15:30:46,621 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-17T15:30:46,622 ERROR [FSHLog-0-hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc-prefix:7a780d55532c,45235,1731857426896 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43439,DS-7e13c295-e1da-44cc-b769-460087a89f8d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:46,623 WARN [FSHLog-0-hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc-prefix:7a780d55532c,45235,1731857426896 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43439,DS-7e13c295-e1da-44cc-b769-460087a89f8d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:46,623 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7a780d55532c%2C45235%2C1731857426896:(num 1731857441294) roll requested 2024-11-17T15:30:46,623 INFO [regionserver/7a780d55532c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C45235%2C1731857426896.1731857446623 2024-11-17T15:30:46,629 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857441294 newFile=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857446623 2024-11-17T15:30:46,629 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:46,629 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:46,629 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:46,629 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:46,629 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:46,629 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857441294 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857446623 2024-11-17T15:30:46,630 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43439,DS-7e13c295-e1da-44cc-b769-460087a89f8d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:46,630 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43439,DS-7e13c295-e1da-44cc-b769-460087a89f8d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:46,630 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857441294 2024-11-17T15:30:46,630 WARN [IPC Server handler 0 on default port 43551 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857441294 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-17T15:30:46,630 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857441294 after 0ms 2024-11-17T15:30:46,632 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41265:41265),(127.0.0.1/127.0.0.1:46741:46741)] 2024-11-17T15:30:46,632 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857441294 is not closed yet, will try archiving it next time 2024-11-17T15:30:46,742 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:46,744 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:47,556 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-17T15:30:47,743 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:47,744 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:48,634 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C45235%2C1731857426896.1731857448634 2024-11-17T15:30:48,639 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857446623 newFile=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857448634 2024-11-17T15:30:48,640 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:48,640 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:48,640 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:48,640 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:48,640 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:48,640 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857446623 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857448634 2024-11-17T15:30:48,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40917 is added to blk_1073741838_1019 (size=1264) 2024-11-17T15:30:48,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41335 is added to blk_1073741838_1019 (size=1264) 2024-11-17T15:30:48,643 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857441294 is not closed yet, will try archiving it next time 2024-11-17T15:30:48,644 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46741:46741),(127.0.0.1/127.0.0.1:41265:41265)] 2024-11-17T15:30:48,644 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857441294 is not closed yet, will try archiving it next time 2024-11-17T15:30:48,645 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857427290 2024-11-17T15:30:48,645 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857427290 2024-11-17T15:30:48,645 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857427290 after 0ms 2024-11-17T15:30:48,645 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857427290 2024-11-17T15:30:48,654 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731857428190/Put/vlen=218/seqid=0] 2024-11-17T15:30:48,654 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731857437943/Put/vlen=1045/seqid=0] 2024-11-17T15:30:48,654 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857427290 2024-11-17T15:30:48,654 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857441294 2024-11-17T15:30:48,654 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857441294 2024-11-17T15:30:48,654 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857441294 after 0ms 2024-11-17T15:30:48,654 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857441294 2024-11-17T15:30:48,657 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731857441293/Put/vlen=1045/seqid=0] 2024-11-17T15:30:48,658 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731857443306/Put/vlen=1045/seqid=0] 2024-11-17T15:30:48,658 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857441294 2024-11-17T15:30:48,658 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857446623 2024-11-17T15:30:48,658 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857446623 2024-11-17T15:30:48,658 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857446623 after 0ms 2024-11-17T15:30:48,658 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857446623 2024-11-17T15:30:48,661 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731857446622/Put/vlen=1045/seqid=0] 2024-11-17T15:30:48,661 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857448634 2024-11-17T15:30:48,661 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857448634 2024-11-17T15:30:48,661 WARN [IPC Server handler 4 on default port 43551 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857448634 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-17T15:30:48,661 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857448634 after 0ms 2024-11-17T15:30:48,743 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:48,745 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:49,560 WARN [ResponseProcessor for block BP-1061960288-172.17.0.2-1731857426049:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1061960288-172.17.0.2-1731857426049:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:49,560 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_660180343_22 at /127.0.0.1:36180 [Receiving block BP-1061960288-172.17.0.2-1731857426049:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:40917:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36180 dst: /127.0.0.1:40917 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:40917 remote=/127.0.0.1:36180]. Total timeout mills is 60000, 59080 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:49,560 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_660180343_22 at /127.0.0.1:55880 [Receiving block BP-1061960288-172.17.0.2-1731857426049:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:41335:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55880 dst: /127.0.0.1:41335 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:30:49,560 WARN [DataStreamer for file /user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857448634 block BP-1061960288-172.17.0.2-1731857426049:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1061960288-172.17.0.2-1731857426049:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40917,DS-7e13c295-e1da-44cc-b769-460087a89f8d,DISK], DatanodeInfoWithStorage[127.0.0.1:41335,DS-a395277a-0be1-49dc-816c-4f82b4a2dbcc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40917,DS-7e13c295-e1da-44cc-b769-460087a89f8d,DISK]) is bad. 2024-11-17T15:30:49,561 WARN [DataStreamer for file /user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857448634 block BP-1061960288-172.17.0.2-1731857426049:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1061960288-172.17.0.2-1731857426049:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:49,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40917 is added to blk_1073741839_1022 (size=85) 2024-11-17T15:30:49,744 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:49,745 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:50,631 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857441294 after 4001ms 2024-11-17T15:30:50,745 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:50,746 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:51,745 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:51,746 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:52,662 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857448634 after 4001ms 2024-11-17T15:30:52,662 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857448634 2024-11-17T15:30:52,666 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857448634 2024-11-17T15:30:52,666 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-17T15:30:52,667 ERROR [FSHLog-0-hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc-prefix:7a780d55532c,45235,1731857426896.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46793,DS-a395277a-0be1-49dc-816c-4f82b4a2dbcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:52,667 WARN [FSHLog-0-hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc-prefix:7a780d55532c,45235,1731857426896.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46793,DS-a395277a-0be1-49dc-816c-4f82b4a2dbcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:52,667 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7a780d55532c%2C45235%2C1731857426896.meta:.meta(num 1731857427680) roll requested 2024-11-17T15:30:52,667 INFO [regionserver/7a780d55532c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C45235%2C1731857426896.meta.1731857452667.meta 2024-11-17T15:30:52,672 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:52,672 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:52,672 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:52,672 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:52,672 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:52,673 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.meta.1731857427680.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.meta.1731857452667.meta 2024-11-17T15:30:52,673 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46793,DS-a395277a-0be1-49dc-816c-4f82b4a2dbcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:52,673 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46793,DS-a395277a-0be1-49dc-816c-4f82b4a2dbcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:52,673 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.meta.1731857427680.meta 2024-11-17T15:30:52,673 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41265:41265),(127.0.0.1/127.0.0.1:46741:46741)] 2024-11-17T15:30:52,673 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.meta.1731857427680.meta is not closed yet, will try archiving it next time 2024-11-17T15:30:52,673 WARN [IPC Server handler 1 on default port 43551 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.meta.1731857427680.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1015 2024-11-17T15:30:52,674 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.meta.1731857427680.meta after 1ms 2024-11-17T15:30:52,688 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/data/hbase/meta/1588230740/.tmp/info/eb1f27c37044470b863e29b4038ecf5f is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731857427835.69a24af8f4caa18cd5e83f4aea481c33./info:regioninfo/1731857428194/Put/seqid=0 2024-11-17T15:30:52,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41335 is added to blk_1073741841_1025 (size=7125) 2024-11-17T15:30:52,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40917 is added to blk_1073741841_1025 (size=7125) 2024-11-17T15:30:52,694 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/data/hbase/meta/1588230740/.tmp/info/eb1f27c37044470b863e29b4038ecf5f 2024-11-17T15:30:52,713 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/data/hbase/meta/1588230740/.tmp/ns/8a997e83c64645cc9b7ade8f6eb60972 is 43, key is default/ns:d/1731857427720/Put/seqid=0 2024-11-17T15:30:52,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40917 is added to blk_1073741842_1026 (size=5153) 2024-11-17T15:30:52,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41335 is added to blk_1073741842_1026 (size=5153) 2024-11-17T15:30:52,718 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/data/hbase/meta/1588230740/.tmp/ns/8a997e83c64645cc9b7ade8f6eb60972 2024-11-17T15:30:52,737 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/data/hbase/meta/1588230740/.tmp/table/cf1e61633c07455bbb7ca2e86f6b332d is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731857428206/Put/seqid=0 2024-11-17T15:30:52,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40917 is added to blk_1073741843_1027 (size=5438) 2024-11-17T15:30:52,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41335 is added to blk_1073741843_1027 (size=5438) 2024-11-17T15:30:52,742 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/data/hbase/meta/1588230740/.tmp/table/cf1e61633c07455bbb7ca2e86f6b332d 2024-11-17T15:30:52,746 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:52,747 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:52,748 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/data/hbase/meta/1588230740/.tmp/info/eb1f27c37044470b863e29b4038ecf5f as hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/data/hbase/meta/1588230740/info/eb1f27c37044470b863e29b4038ecf5f 2024-11-17T15:30:52,753 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/data/hbase/meta/1588230740/info/eb1f27c37044470b863e29b4038ecf5f, entries=10, sequenceid=11, filesize=7.0 K 2024-11-17T15:30:52,753 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/data/hbase/meta/1588230740/.tmp/ns/8a997e83c64645cc9b7ade8f6eb60972 as hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/data/hbase/meta/1588230740/ns/8a997e83c64645cc9b7ade8f6eb60972 2024-11-17T15:30:52,759 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/data/hbase/meta/1588230740/ns/8a997e83c64645cc9b7ade8f6eb60972, entries=2, sequenceid=11, filesize=5.0 K 2024-11-17T15:30:52,759 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/data/hbase/meta/1588230740/.tmp/table/cf1e61633c07455bbb7ca2e86f6b332d as hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/data/hbase/meta/1588230740/table/cf1e61633c07455bbb7ca2e86f6b332d 2024-11-17T15:30:52,765 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/data/hbase/meta/1588230740/table/cf1e61633c07455bbb7ca2e86f6b332d, entries=2, sequenceid=11, filesize=5.3 K 2024-11-17T15:30:52,766 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 100ms, sequenceid=11, compaction requested=false 2024-11-17T15:30:52,766 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-17T15:30:52,767 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 69a24af8f4caa18cd5e83f4aea481c33 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-17T15:30:52,767 ERROR [FSHLog-0-hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc-prefix:7a780d55532c,45235,1731857426896 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1061960288-172.17.0.2-1731857426049:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:52,767 WARN [FSHLog-0-hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc-prefix:7a780d55532c,45235,1731857426896 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1061960288-172.17.0.2-1731857426049:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:52,768 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7a780d55532c%2C45235%2C1731857426896:(num 1731857448634) roll requested 2024-11-17T15:30:52,768 INFO [regionserver/7a780d55532c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C45235%2C1731857426896.1731857452768 2024-11-17T15:30:52,772 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857448634 newFile=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857452768 2024-11-17T15:30:52,773 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:52,773 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:52,773 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:52,773 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:52,773 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:52,773 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857448634 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857452768 2024-11-17T15:30:52,773 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1061960288-172.17.0.2-1731857426049:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:52,774 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1061960288-172.17.0.2-1731857426049:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:52,774 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857448634 2024-11-17T15:30:52,774 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857448634 after 0ms 2024-11-17T15:30:52,774 DEBUG [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41265:41265),(127.0.0.1/127.0.0.1:46741:46741)] 2024-11-17T15:30:52,776 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.1731857448634 to hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/oldWALs/7a780d55532c%2C45235%2C1731857426896.1731857448634 2024-11-17T15:30:52,792 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/data/default/TestLogRolling-testLogRollOnPipelineRestart/69a24af8f4caa18cd5e83f4aea481c33/.tmp/info/fedd73aad6ef414a9ed16f729e57d0d3 is 1080, key is row1002/info:/1731857437943/Put/seqid=0 2024-11-17T15:30:52,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40917 is added to blk_1073741845_1029 (size=9270) 2024-11-17T15:30:52,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41335 is added to blk_1073741845_1029 (size=9270) 2024-11-17T15:30:52,797 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/data/default/TestLogRolling-testLogRollOnPipelineRestart/69a24af8f4caa18cd5e83f4aea481c33/.tmp/info/fedd73aad6ef414a9ed16f729e57d0d3 2024-11-17T15:30:52,803 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/data/default/TestLogRolling-testLogRollOnPipelineRestart/69a24af8f4caa18cd5e83f4aea481c33/.tmp/info/fedd73aad6ef414a9ed16f729e57d0d3 as hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/data/default/TestLogRolling-testLogRollOnPipelineRestart/69a24af8f4caa18cd5e83f4aea481c33/info/fedd73aad6ef414a9ed16f729e57d0d3 2024-11-17T15:30:52,808 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/data/default/TestLogRolling-testLogRollOnPipelineRestart/69a24af8f4caa18cd5e83f4aea481c33/info/fedd73aad6ef414a9ed16f729e57d0d3, entries=4, sequenceid=8, filesize=9.1 K 2024-11-17T15:30:52,809 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 69a24af8f4caa18cd5e83f4aea481c33 in 43ms, sequenceid=8, compaction requested=false 2024-11-17T15:30:52,809 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 69a24af8f4caa18cd5e83f4aea481c33: 2024-11-17T15:30:52,814 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-17T15:30:52,814 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T15:30:52,815 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T15:30:52,815 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:30:52,815 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:30:52,815 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-17T15:30:52,815 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-17T15:30:52,815 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=737493324, stopped=false 2024-11-17T15:30:52,815 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7a780d55532c,33399,1731857426845 2024-11-17T15:30:52,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45235-0x101268d141f0001, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T15:30:52,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33399-0x101268d141f0000, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T15:30:52,818 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45235-0x101268d141f0001, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:30:52,818 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33399-0x101268d141f0000, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:30:52,818 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T15:30:52,818 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T15:30:52,818 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T15:30:52,818 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:30:52,818 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7a780d55532c,45235,1731857426896' ***** 2024-11-17T15:30:52,818 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-17T15:30:52,818 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33399-0x101268d141f0000, quorum=127.0.0.1:56215, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T15:30:52,819 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45235-0x101268d141f0001, quorum=127.0.0.1:56215, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T15:30:52,819 INFO [RS:0;7a780d55532c:45235 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-17T15:30:52,819 INFO [RS:0;7a780d55532c:45235 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-17T15:30:52,819 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-17T15:30:52,819 INFO [RS:0;7a780d55532c:45235 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-17T15:30:52,819 INFO [RS:0;7a780d55532c:45235 {}] regionserver.HRegionServer(3091): Received CLOSE for 69a24af8f4caa18cd5e83f4aea481c33 2024-11-17T15:30:52,819 INFO [RS:0;7a780d55532c:45235 {}] regionserver.HRegionServer(959): stopping server 7a780d55532c,45235,1731857426896 2024-11-17T15:30:52,819 INFO [RS:0;7a780d55532c:45235 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T15:30:52,819 INFO [RS:0;7a780d55532c:45235 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7a780d55532c:45235. 2024-11-17T15:30:52,819 DEBUG [RS:0;7a780d55532c:45235 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T15:30:52,819 DEBUG [RS:0;7a780d55532c:45235 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:30:52,820 INFO [RS:0;7a780d55532c:45235 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-17T15:30:52,820 INFO [RS:0;7a780d55532c:45235 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-17T15:30:52,820 INFO [RS:0;7a780d55532c:45235 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-17T15:30:52,819 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 69a24af8f4caa18cd5e83f4aea481c33, disabling compactions & flushes 2024-11-17T15:30:52,820 INFO [RS:0;7a780d55532c:45235 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-17T15:30:52,820 INFO [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731857427835.69a24af8f4caa18cd5e83f4aea481c33. 2024-11-17T15:30:52,820 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731857427835.69a24af8f4caa18cd5e83f4aea481c33. 2024-11-17T15:30:52,820 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731857427835.69a24af8f4caa18cd5e83f4aea481c33. after waiting 0 ms 2024-11-17T15:30:52,820 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731857427835.69a24af8f4caa18cd5e83f4aea481c33. 2024-11-17T15:30:52,820 INFO [RS:0;7a780d55532c:45235 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-17T15:30:52,820 DEBUG [RS:0;7a780d55532c:45235 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 69a24af8f4caa18cd5e83f4aea481c33=TestLogRolling-testLogRollOnPipelineRestart,,1731857427835.69a24af8f4caa18cd5e83f4aea481c33.} 2024-11-17T15:30:52,820 DEBUG [RS:0;7a780d55532c:45235 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 69a24af8f4caa18cd5e83f4aea481c33 2024-11-17T15:30:52,820 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T15:30:52,820 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T15:30:52,820 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T15:30:52,820 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T15:30:52,820 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T15:30:52,825 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/data/default/TestLogRolling-testLogRollOnPipelineRestart/69a24af8f4caa18cd5e83f4aea481c33/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-17T15:30:52,826 INFO [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731857427835.69a24af8f4caa18cd5e83f4aea481c33. 2024-11-17T15:30:52,826 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 69a24af8f4caa18cd5e83f4aea481c33: Waiting for close lock at 1731857452819Running coprocessor pre-close hooks at 1731857452819Disabling compacts and flushes for region at 1731857452819Disabling writes for close at 1731857452820 (+1 ms)Writing region close event to WAL at 1731857452821 (+1 ms)Running coprocessor post-close hooks at 1731857452826 (+5 ms)Closed at 1731857452826 2024-11-17T15:30:52,826 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731857427835.69a24af8f4caa18cd5e83f4aea481c33. 2024-11-17T15:30:52,829 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-17T15:30:52,830 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T15:30:52,830 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T15:30:52,830 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731857452820Running coprocessor pre-close hooks at 1731857452820Disabling compacts and flushes for region at 1731857452820Disabling writes for close at 1731857452820Writing region close event to WAL at 1731857452825 (+5 ms)Running coprocessor post-close hooks at 1731857452829 (+4 ms)Closed at 1731857452830 (+1 ms) 2024-11-17T15:30:52,830 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-17T15:30:53,020 INFO [RS:0;7a780d55532c:45235 {}] regionserver.HRegionServer(976): stopping server 7a780d55532c,45235,1731857426896; all regions closed. 2024-11-17T15:30:53,021 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:53,021 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:53,021 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:53,021 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:53,021 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:53,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40917 is added to blk_1073741840_1023 (size=825) 2024-11-17T15:30:53,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41335 is added to blk_1073741840_1023 (size=825) 2024-11-17T15:30:53,158 INFO [regionserver/7a780d55532c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T15:30:53,224 INFO [regionserver/7a780d55532c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-17T15:30:53,224 INFO [regionserver/7a780d55532c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-17T15:30:53,746 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:53,747 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:54,747 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:54,747 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:55,748 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:55,748 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:56,674 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.meta.1731857427680.meta after 4001ms 2024-11-17T15:30:56,675 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/WALs/7a780d55532c,45235,1731857426896/7a780d55532c%2C45235%2C1731857426896.meta.1731857427680.meta to hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/oldWALs/7a780d55532c%2C45235%2C1731857426896.meta.1731857427680.meta 2024-11-17T15:30:56,678 DEBUG [RS:0;7a780d55532c:45235 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/oldWALs 2024-11-17T15:30:56,678 INFO [RS:0;7a780d55532c:45235 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7a780d55532c%2C45235%2C1731857426896.meta:.meta(num 1731857452667) 2024-11-17T15:30:56,678 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:56,678 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:56,678 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:56,678 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:56,678 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:56,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40917 is added to blk_1073741844_1028 (size=1162) 2024-11-17T15:30:56,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41335 is added to blk_1073741844_1028 (size=1162) 2024-11-17T15:30:56,684 DEBUG [RS:0;7a780d55532c:45235 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/oldWALs 2024-11-17T15:30:56,684 INFO [RS:0;7a780d55532c:45235 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7a780d55532c%2C45235%2C1731857426896:(num 1731857452768) 2024-11-17T15:30:56,684 DEBUG [RS:0;7a780d55532c:45235 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:30:56,684 INFO [RS:0;7a780d55532c:45235 {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T15:30:56,684 INFO [RS:0;7a780d55532c:45235 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T15:30:56,685 INFO [RS:0;7a780d55532c:45235 {}] hbase.ChoreService(370): Chore service for: regionserver/7a780d55532c:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-17T15:30:56,685 INFO [RS:0;7a780d55532c:45235 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T15:30:56,685 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T15:30:56,685 INFO [RS:0;7a780d55532c:45235 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45235 2024-11-17T15:30:56,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45235-0x101268d141f0001, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7a780d55532c,45235,1731857426896 2024-11-17T15:30:56,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33399-0x101268d141f0000, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T15:30:56,687 INFO [RS:0;7a780d55532c:45235 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T15:30:56,689 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7a780d55532c,45235,1731857426896] 2024-11-17T15:30:56,691 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7a780d55532c,45235,1731857426896 already deleted, retry=false 2024-11-17T15:30:56,691 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7a780d55532c,45235,1731857426896 expired; onlineServers=0 2024-11-17T15:30:56,691 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7a780d55532c,33399,1731857426845' ***** 2024-11-17T15:30:56,691 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-17T15:30:56,691 INFO [M:0;7a780d55532c:33399 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T15:30:56,691 INFO [M:0;7a780d55532c:33399 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T15:30:56,691 DEBUG [M:0;7a780d55532c:33399 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-17T15:30:56,691 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-17T15:30:56,691 DEBUG [M:0;7a780d55532c:33399 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-17T15:30:56,691 DEBUG [master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.large.0-1731857427076 {}] cleaner.HFileCleaner(306): Exit Thread[master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.large.0-1731857427076,5,FailOnTimeoutGroup] 2024-11-17T15:30:56,691 DEBUG [master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.small.0-1731857427076 {}] cleaner.HFileCleaner(306): Exit Thread[master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.small.0-1731857427076,5,FailOnTimeoutGroup] 2024-11-17T15:30:56,691 INFO [M:0;7a780d55532c:33399 {}] hbase.ChoreService(370): Chore service for: master/7a780d55532c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-17T15:30:56,691 INFO [M:0;7a780d55532c:33399 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T15:30:56,691 DEBUG [M:0;7a780d55532c:33399 {}] master.HMaster(1795): Stopping service threads 2024-11-17T15:30:56,691 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1015: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-17T15:30:56,692 INFO [M:0;7a780d55532c:33399 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-17T15:30:56,692 INFO [M:0;7a780d55532c:33399 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T15:30:56,692 INFO [M:0;7a780d55532c:33399 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-17T15:30:56,692 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-17T15:30:56,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33399-0x101268d141f0000, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-17T15:30:56,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33399-0x101268d141f0000, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:30:56,694 DEBUG [M:0;7a780d55532c:33399 {}] zookeeper.ZKUtil(347): master:33399-0x101268d141f0000, quorum=127.0.0.1:56215, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-17T15:30:56,694 WARN [M:0;7a780d55532c:33399 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-17T15:30:56,695 INFO [M:0;7a780d55532c:33399 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/.lastflushedseqids 2024-11-17T15:30:56,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41335 is added to blk_1073741846_1030 (size=111) 2024-11-17T15:30:56,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40917 is added to blk_1073741846_1030 (size=111) 2024-11-17T15:30:56,700 INFO [M:0;7a780d55532c:33399 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-17T15:30:56,700 INFO [M:0;7a780d55532c:33399 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-17T15:30:56,701 DEBUG [M:0;7a780d55532c:33399 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T15:30:56,701 INFO [M:0;7a780d55532c:33399 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:30:56,701 DEBUG [M:0;7a780d55532c:33399 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:30:56,701 DEBUG [M:0;7a780d55532c:33399 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T15:30:56,701 DEBUG [M:0;7a780d55532c:33399 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:30:56,701 INFO [M:0;7a780d55532c:33399 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.16 KB heapSize=29.13 KB 2024-11-17T15:30:56,701 ERROR [FSHLog-0-hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData-prefix:7a780d55532c,33399,1731857426845 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46793,DS-a395277a-0be1-49dc-816c-4f82b4a2dbcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:56,701 WARN [FSHLog-0-hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData-prefix:7a780d55532c,33399,1731857426845 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46793,DS-a395277a-0be1-49dc-816c-4f82b4a2dbcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:56,701 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 7a780d55532c%2C33399%2C1731857426845:(num 1731857426989) roll requested 2024-11-17T15:30:56,702 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C33399%2C1731857426845.1731857456702 2024-11-17T15:30:56,706 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:56,707 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:56,707 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:56,707 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:56,707 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:56,707 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/WALs/7a780d55532c,33399,1731857426845/7a780d55532c%2C33399%2C1731857426845.1731857426989 with entries=53, filesize=26.61 KB; new WAL /user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/WALs/7a780d55532c,33399,1731857426845/7a780d55532c%2C33399%2C1731857426845.1731857456702 2024-11-17T15:30:56,707 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46793,DS-a395277a-0be1-49dc-816c-4f82b4a2dbcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:56,707 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46793,DS-a395277a-0be1-49dc-816c-4f82b4a2dbcc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T15:30:56,707 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/WALs/7a780d55532c,33399,1731857426845/7a780d55532c%2C33399%2C1731857426845.1731857426989 2024-11-17T15:30:56,708 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41265:41265),(127.0.0.1/127.0.0.1:46741:46741)] 2024-11-17T15:30:56,708 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/WALs/7a780d55532c,33399,1731857426845/7a780d55532c%2C33399%2C1731857426845.1731857426989 is not closed yet, will try archiving it next time 2024-11-17T15:30:56,708 WARN [IPC Server handler 1 on default port 43551 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/WALs/7a780d55532c,33399,1731857426845/7a780d55532c%2C33399%2C1731857426845.1731857426989 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-11-17T15:30:56,708 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/WALs/7a780d55532c,33399,1731857426845/7a780d55532c%2C33399%2C1731857426845.1731857426989 after 1ms 2024-11-17T15:30:56,723 DEBUG [M:0;7a780d55532c:33399 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3c067c53fa5d441fabd1e1398f4d6935 is 82, key is hbase:meta,,1/info:regioninfo/1731857427704/Put/seqid=0 2024-11-17T15:30:56,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40917 is added to blk_1073741848_1033 (size=5672) 2024-11-17T15:30:56,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41335 is added to blk_1073741848_1033 (size=5672) 2024-11-17T15:30:56,729 INFO [M:0;7a780d55532c:33399 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3c067c53fa5d441fabd1e1398f4d6935 2024-11-17T15:30:56,748 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:56,748 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:56,754 DEBUG [M:0;7a780d55532c:33399 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/94064a8792754c3e80042ad2a9652031 is 777, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731857428210/Put/seqid=0 2024-11-17T15:30:56,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40917 is added to blk_1073741849_1034 (size=6117) 2024-11-17T15:30:56,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41335 is added to blk_1073741849_1034 (size=6117) 2024-11-17T15:30:56,760 INFO [M:0;7a780d55532c:33399 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.56 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/94064a8792754c3e80042ad2a9652031 2024-11-17T15:30:56,778 DEBUG [M:0;7a780d55532c:33399 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e1795644df1c411fafb935cfdf8ae818 is 69, key is 7a780d55532c,45235,1731857426896/rs:state/1731857427140/Put/seqid=0 2024-11-17T15:30:56,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40917 is added to blk_1073741850_1035 (size=5156) 2024-11-17T15:30:56,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41335 is added to blk_1073741850_1035 (size=5156) 2024-11-17T15:30:56,783 INFO [M:0;7a780d55532c:33399 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e1795644df1c411fafb935cfdf8ae818 2024-11-17T15:30:56,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45235-0x101268d141f0001, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T15:30:56,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45235-0x101268d141f0001, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T15:30:56,789 INFO [RS:0;7a780d55532c:45235 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T15:30:56,789 INFO [RS:0;7a780d55532c:45235 {}] regionserver.HRegionServer(1031): Exiting; stopping=7a780d55532c,45235,1731857426896; zookeeper connection closed. 2024-11-17T15:30:56,790 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6ae5e010 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6ae5e010 2024-11-17T15:30:56,790 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-17T15:30:56,802 DEBUG [M:0;7a780d55532c:33399 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8a3d42fdea9b4168b97686513c347724 is 52, key is load_balancer_on/state:d/1731857427830/Put/seqid=0 2024-11-17T15:30:56,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40917 is added to blk_1073741851_1036 (size=5056) 2024-11-17T15:30:56,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41335 is added to blk_1073741851_1036 (size=5056) 2024-11-17T15:30:56,808 INFO [M:0;7a780d55532c:33399 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8a3d42fdea9b4168b97686513c347724 2024-11-17T15:30:56,813 DEBUG [M:0;7a780d55532c:33399 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3c067c53fa5d441fabd1e1398f4d6935 as hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3c067c53fa5d441fabd1e1398f4d6935 2024-11-17T15:30:56,818 INFO [M:0;7a780d55532c:33399 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3c067c53fa5d441fabd1e1398f4d6935, entries=8, sequenceid=56, filesize=5.5 K 2024-11-17T15:30:56,819 DEBUG [M:0;7a780d55532c:33399 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/94064a8792754c3e80042ad2a9652031 as hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/94064a8792754c3e80042ad2a9652031 2024-11-17T15:30:56,825 INFO [M:0;7a780d55532c:33399 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/94064a8792754c3e80042ad2a9652031, entries=6, sequenceid=56, filesize=6.0 K 2024-11-17T15:30:56,826 DEBUG [M:0;7a780d55532c:33399 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e1795644df1c411fafb935cfdf8ae818 as hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e1795644df1c411fafb935cfdf8ae818 2024-11-17T15:30:56,829 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-17T15:30:56,831 INFO [M:0;7a780d55532c:33399 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e1795644df1c411fafb935cfdf8ae818, entries=1, sequenceid=56, filesize=5.0 K 2024-11-17T15:30:56,832 DEBUG [M:0;7a780d55532c:33399 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8a3d42fdea9b4168b97686513c347724 as hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8a3d42fdea9b4168b97686513c347724 2024-11-17T15:30:56,836 INFO [M:0;7a780d55532c:33399 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8a3d42fdea9b4168b97686513c347724, entries=1, sequenceid=56, filesize=4.9 K 2024-11-17T15:30:56,838 INFO [M:0;7a780d55532c:33399 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 136ms, sequenceid=56, compaction requested=false 2024-11-17T15:30:56,839 INFO [M:0;7a780d55532c:33399 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:30:56,839 DEBUG [M:0;7a780d55532c:33399 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731857456701Disabling compacts and flushes for region at 1731857456701Disabling writes for close at 1731857456701Obtaining lock to block concurrent updates at 1731857456701Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731857456701Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23714, getHeapSize=29768, getOffHeapSize=0, getCellsCount=67 at 1731857456701Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731857456708 (+7 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731857456708Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731857456722 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731857456723 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731857456735 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731857456753 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731857456753Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731857456764 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731857456778 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731857456778Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731857456788 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731857456802 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731857456802Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5849a3ef: reopening flushed file at 1731857456812 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4ecfee4b: reopening flushed file at 1731857456819 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@108b7d52: reopening flushed file at 1731857456825 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@528e1107: reopening flushed file at 1731857456831 (+6 ms)Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 136ms, sequenceid=56, compaction requested=false at 1731857456838 (+7 ms)Writing region close event to WAL at 1731857456839 (+1 ms)Closed at 1731857456839 2024-11-17T15:30:56,840 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:56,840 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:56,840 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:56,840 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:56,840 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:30:56,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40917 is added to blk_1073741847_1031 (size=757) 2024-11-17T15:30:56,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41335 is added to blk_1073741847_1031 (size=757) 2024-11-17T15:30:57,749 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:57,749 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:57,826 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:57,826 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:57,846 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:57,847 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:57,847 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:57,847 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:57,847 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:57,848 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:57,852 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:57,852 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:57,853 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:57,855 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:57,861 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:57,861 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:58,365 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-17T15:30:58,366 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:58,366 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:58,366 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:58,366 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:58,379 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:58,379 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:58,380 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:58,380 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:58,380 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:58,380 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:58,383 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:58,383 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:58,384 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:58,386 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:30:58,749 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:58,749 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:59,691 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-17T15:30:59,750 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:30:59,750 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:00,709 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/WALs/7a780d55532c,33399,1731857426845/7a780d55532c%2C33399%2C1731857426845.1731857426989 after 4002ms 2024-11-17T15:31:00,709 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/WALs/7a780d55532c,33399,1731857426845/7a780d55532c%2C33399%2C1731857426845.1731857426989 to hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/oldWALs/7a780d55532c%2C33399%2C1731857426845.1731857426989 2024-11-17T15:31:00,712 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/MasterData/oldWALs/7a780d55532c%2C33399%2C1731857426845.1731857426989 to hdfs://localhost:43551/user/jenkins/test-data/a1dc62c9-4986-c636-2f3e-c99c544103cc/oldWALs/7a780d55532c%2C33399%2C1731857426845.1731857426989$masterlocalwal$ 2024-11-17T15:31:00,712 INFO [M:0;7a780d55532c:33399 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-17T15:31:00,712 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T15:31:00,712 INFO [M:0;7a780d55532c:33399 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33399 2024-11-17T15:31:00,712 INFO [M:0;7a780d55532c:33399 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T15:31:00,751 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:00,751 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:00,814 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33399-0x101268d141f0000, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T15:31:00,814 INFO [M:0;7a780d55532c:33399 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T15:31:00,814 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33399-0x101268d141f0000, quorum=127.0.0.1:56215, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T15:31:00,816 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@27703b15{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:31:00,817 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@51f33716{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T15:31:00,817 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T15:31:00,817 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@511ae001{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T15:31:00,817 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1146b324{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/hadoop.log.dir/,STOPPED} 2024-11-17T15:31:00,818 WARN [BP-1061960288-172.17.0.2-1731857426049 heartbeating to localhost/127.0.0.1:43551 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T15:31:00,818 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T15:31:00,818 WARN [BP-1061960288-172.17.0.2-1731857426049 heartbeating to localhost/127.0.0.1:43551 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1061960288-172.17.0.2-1731857426049 (Datanode Uuid 12f6a0f3-ac45-417e-acc2-7b19ff10e633) service to localhost/127.0.0.1:43551 2024-11-17T15:31:00,818 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T15:31:00,819 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/cluster_51169b72-0b57-551e-5b79-ff6d39288205/data/data3/current/BP-1061960288-172.17.0.2-1731857426049 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:31:00,819 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/cluster_51169b72-0b57-551e-5b79-ff6d39288205/data/data4/current/BP-1061960288-172.17.0.2-1731857426049 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:31:00,819 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T15:31:00,821 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4bda5c57{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:31:00,821 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@40b8cc2f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T15:31:00,821 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T15:31:00,821 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@22699b58{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T15:31:00,822 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@27361061{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/hadoop.log.dir/,STOPPED} 2024-11-17T15:31:00,823 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T15:31:00,823 WARN [BP-1061960288-172.17.0.2-1731857426049 heartbeating to localhost/127.0.0.1:43551 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T15:31:00,823 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T15:31:00,823 WARN [BP-1061960288-172.17.0.2-1731857426049 heartbeating to localhost/127.0.0.1:43551 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1061960288-172.17.0.2-1731857426049 (Datanode Uuid 447dae23-b803-487e-bf73-7d03e699e225) service to localhost/127.0.0.1:43551 2024-11-17T15:31:00,823 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/cluster_51169b72-0b57-551e-5b79-ff6d39288205/data/data1/current/BP-1061960288-172.17.0.2-1731857426049 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:31:00,824 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/cluster_51169b72-0b57-551e-5b79-ff6d39288205/data/data2/current/BP-1061960288-172.17.0.2-1731857426049 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:31:00,824 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T15:31:00,829 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@40ce61ea{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T15:31:00,829 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3734ddc7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T15:31:00,829 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T15:31:00,829 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35d68916{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T15:31:00,830 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8df6f39{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/hadoop.log.dir/,STOPPED} 2024-11-17T15:31:00,835 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-17T15:31:00,852 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-17T15:31:00,860 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=179 (was 154) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43551 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:43551 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43551 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:43551 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:43551 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43551 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:43551 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43551 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 452) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=110 (was 188), ProcessCount=11 (was 11), AvailableMemoryMB=3658 (was 3820) 2024-11-17T15:31:00,867 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=179, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=110, ProcessCount=11, AvailableMemoryMB=3659 2024-11-17T15:31:00,868 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-17T15:31:00,868 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/hadoop.log.dir so I do NOT create it in target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2 2024-11-17T15:31:00,868 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/466a8a68-b03c-5395-5402-4e756ae99751/hadoop.tmp.dir so I do NOT create it in target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2 2024-11-17T15:31:00,868 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/cluster_55fdf521-ca31-d682-af57-8e9f6a5a76b0, deleteOnExit=true 2024-11-17T15:31:00,868 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-17T15:31:00,868 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/test.cache.data in system properties and HBase conf 2024-11-17T15:31:00,868 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/hadoop.tmp.dir in system properties and HBase conf 2024-11-17T15:31:00,868 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/hadoop.log.dir in system properties and HBase conf 2024-11-17T15:31:00,868 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-17T15:31:00,868 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-17T15:31:00,868 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-17T15:31:00,868 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-17T15:31:00,869 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-17T15:31:00,869 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-17T15:31:00,869 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-17T15:31:00,869 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T15:31:00,869 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-17T15:31:00,869 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-17T15:31:00,869 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T15:31:00,869 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T15:31:00,869 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-17T15:31:00,869 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/nfs.dump.dir in system properties and HBase conf 2024-11-17T15:31:00,869 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/java.io.tmpdir in system properties and HBase conf 2024-11-17T15:31:00,869 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T15:31:00,869 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-17T15:31:00,869 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-17T15:31:00,882 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T15:31:00,950 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:31:00,954 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T15:31:00,955 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T15:31:00,955 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T15:31:00,955 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T15:31:00,956 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:31:00,956 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5050f271{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/hadoop.log.dir/,AVAILABLE} 2024-11-17T15:31:00,956 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@582569f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T15:31:01,071 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@424117a8{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/java.io.tmpdir/jetty-localhost-35637-hadoop-hdfs-3_4_1-tests_jar-_-any-5444463427780588611/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T15:31:01,071 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4af78b23{HTTP/1.1, (http/1.1)}{localhost:35637} 2024-11-17T15:31:01,072 INFO [Time-limited test {}] server.Server(415): Started @184993ms 2024-11-17T15:31:01,084 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T15:31:01,135 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:31:01,137 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T15:31:01,138 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T15:31:01,138 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T15:31:01,138 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T15:31:01,139 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3995deff{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/hadoop.log.dir/,AVAILABLE} 2024-11-17T15:31:01,139 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@325c1ff6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T15:31:01,253 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7fcc5296{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/java.io.tmpdir/jetty-localhost-45413-hadoop-hdfs-3_4_1-tests_jar-_-any-10076933904250833356/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:31:01,253 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5e22a5cd{HTTP/1.1, (http/1.1)}{localhost:45413} 2024-11-17T15:31:01,254 INFO [Time-limited test {}] server.Server(415): Started @185175ms 2024-11-17T15:31:01,255 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T15:31:01,282 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:31:01,284 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T15:31:01,285 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T15:31:01,285 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T15:31:01,285 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T15:31:01,285 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18b44758{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/hadoop.log.dir/,AVAILABLE} 2024-11-17T15:31:01,286 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@15cc44b6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T15:31:01,344 WARN [Thread-1649 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/cluster_55fdf521-ca31-d682-af57-8e9f6a5a76b0/data/data1/current/BP-752870497-172.17.0.2-1731857460897/current, will proceed with Du for space computation calculation, 2024-11-17T15:31:01,344 WARN [Thread-1650 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/cluster_55fdf521-ca31-d682-af57-8e9f6a5a76b0/data/data2/current/BP-752870497-172.17.0.2-1731857460897/current, will proceed with Du for space computation calculation, 2024-11-17T15:31:01,360 WARN [Thread-1628 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T15:31:01,362 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6df1409a5f76e89e with lease ID 0x6a459fd7fad05c4f: Processing first storage report for DS-96be9432-8102-40ae-b045-990beb8ffdde from datanode DatanodeRegistration(127.0.0.1:36147, datanodeUuid=f4ed5083-c823-4527-9187-182928e8dd70, infoPort=38135, infoSecurePort=0, ipcPort=40459, storageInfo=lv=-57;cid=testClusterID;nsid=722135734;c=1731857460897) 2024-11-17T15:31:01,362 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6df1409a5f76e89e with lease ID 0x6a459fd7fad05c4f: from storage DS-96be9432-8102-40ae-b045-990beb8ffdde node DatanodeRegistration(127.0.0.1:36147, datanodeUuid=f4ed5083-c823-4527-9187-182928e8dd70, infoPort=38135, infoSecurePort=0, ipcPort=40459, storageInfo=lv=-57;cid=testClusterID;nsid=722135734;c=1731857460897), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T15:31:01,362 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6df1409a5f76e89e with lease ID 0x6a459fd7fad05c4f: Processing first storage report for DS-45354906-e99d-4344-b420-1d6a66d10015 from datanode DatanodeRegistration(127.0.0.1:36147, datanodeUuid=f4ed5083-c823-4527-9187-182928e8dd70, infoPort=38135, infoSecurePort=0, ipcPort=40459, storageInfo=lv=-57;cid=testClusterID;nsid=722135734;c=1731857460897) 2024-11-17T15:31:01,362 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6df1409a5f76e89e with lease ID 0x6a459fd7fad05c4f: from storage DS-45354906-e99d-4344-b420-1d6a66d10015 node DatanodeRegistration(127.0.0.1:36147, datanodeUuid=f4ed5083-c823-4527-9187-182928e8dd70, infoPort=38135, infoSecurePort=0, ipcPort=40459, storageInfo=lv=-57;cid=testClusterID;nsid=722135734;c=1731857460897), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T15:31:01,379 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T15:31:01,379 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-17T15:31:01,379 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-17T15:31:01,379 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-17T15:31:01,402 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@a0e8b8e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/java.io.tmpdir/jetty-localhost-46313-hadoop-hdfs-3_4_1-tests_jar-_-any-2153000497582300572/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:31:01,402 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@26be96d6{HTTP/1.1, (http/1.1)}{localhost:46313} 2024-11-17T15:31:01,402 INFO [Time-limited test {}] server.Server(415): Started @185324ms 2024-11-17T15:31:01,403 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T15:31:01,501 WARN [Thread-1675 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/cluster_55fdf521-ca31-d682-af57-8e9f6a5a76b0/data/data3/current/BP-752870497-172.17.0.2-1731857460897/current, will proceed with Du for space computation calculation, 2024-11-17T15:31:01,501 WARN [Thread-1676 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/cluster_55fdf521-ca31-d682-af57-8e9f6a5a76b0/data/data4/current/BP-752870497-172.17.0.2-1731857460897/current, will proceed with Du for space computation calculation, 2024-11-17T15:31:01,517 WARN [Thread-1664 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T15:31:01,520 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbd3c22ebd23789f8 with lease ID 0x6a459fd7fad05c50: Processing first storage report for DS-5287ed50-0ca1-4bab-ae63-fb83dd8588cf from datanode DatanodeRegistration(127.0.0.1:34823, datanodeUuid=f3d9b4e4-4bd7-464f-9745-2ae466b3d823, infoPort=41259, infoSecurePort=0, ipcPort=38717, storageInfo=lv=-57;cid=testClusterID;nsid=722135734;c=1731857460897) 2024-11-17T15:31:01,520 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbd3c22ebd23789f8 with lease ID 0x6a459fd7fad05c50: from storage DS-5287ed50-0ca1-4bab-ae63-fb83dd8588cf node DatanodeRegistration(127.0.0.1:34823, datanodeUuid=f3d9b4e4-4bd7-464f-9745-2ae466b3d823, infoPort=41259, infoSecurePort=0, ipcPort=38717, storageInfo=lv=-57;cid=testClusterID;nsid=722135734;c=1731857460897), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T15:31:01,520 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbd3c22ebd23789f8 with lease ID 0x6a459fd7fad05c50: Processing first storage report for DS-fd532289-625d-4948-8b90-c483f7a39f4b from datanode DatanodeRegistration(127.0.0.1:34823, datanodeUuid=f3d9b4e4-4bd7-464f-9745-2ae466b3d823, infoPort=41259, infoSecurePort=0, ipcPort=38717, storageInfo=lv=-57;cid=testClusterID;nsid=722135734;c=1731857460897) 2024-11-17T15:31:01,520 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbd3c22ebd23789f8 with lease ID 0x6a459fd7fad05c50: from storage DS-fd532289-625d-4948-8b90-c483f7a39f4b node DatanodeRegistration(127.0.0.1:34823, datanodeUuid=f3d9b4e4-4bd7-464f-9745-2ae466b3d823, infoPort=41259, infoSecurePort=0, ipcPort=38717, storageInfo=lv=-57;cid=testClusterID;nsid=722135734;c=1731857460897), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T15:31:01,526 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2 2024-11-17T15:31:01,528 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/cluster_55fdf521-ca31-d682-af57-8e9f6a5a76b0/zookeeper_0, clientPort=63112, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/cluster_55fdf521-ca31-d682-af57-8e9f6a5a76b0/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/cluster_55fdf521-ca31-d682-af57-8e9f6a5a76b0/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-17T15:31:01,529 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63112 2024-11-17T15:31:01,529 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:31:01,530 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:31:01,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741825_1001 (size=7) 2024-11-17T15:31:01,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34823 is added to blk_1073741825_1001 (size=7) 2024-11-17T15:31:01,540 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09 with version=8 2024-11-17T15:31:01,540 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/hbase-staging 2024-11-17T15:31:01,543 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7a780d55532c:0 server-side Connection retries=45 2024-11-17T15:31:01,543 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T15:31:01,543 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T15:31:01,543 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T15:31:01,543 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T15:31:01,543 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T15:31:01,543 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-17T15:31:01,543 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T15:31:01,544 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45217 2024-11-17T15:31:01,546 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45217 connecting to ZooKeeper ensemble=127.0.0.1:63112 2024-11-17T15:31:01,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:452170x0, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T15:31:01,555 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45217-0x101268d9ba70000 connected 2024-11-17T15:31:01,569 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:31:01,571 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:31:01,573 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45217-0x101268d9ba70000, quorum=127.0.0.1:63112, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T15:31:01,573 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09, hbase.cluster.distributed=false 2024-11-17T15:31:01,574 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45217-0x101268d9ba70000, quorum=127.0.0.1:63112, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T15:31:01,575 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45217 2024-11-17T15:31:01,575 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45217 2024-11-17T15:31:01,575 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45217 2024-11-17T15:31:01,575 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45217 2024-11-17T15:31:01,575 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45217 2024-11-17T15:31:01,590 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7a780d55532c:0 server-side Connection retries=45 2024-11-17T15:31:01,590 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T15:31:01,590 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T15:31:01,591 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T15:31:01,591 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T15:31:01,591 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T15:31:01,591 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-17T15:31:01,591 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T15:31:01,591 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40875 2024-11-17T15:31:01,592 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40875 connecting to ZooKeeper ensemble=127.0.0.1:63112 2024-11-17T15:31:01,593 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:31:01,594 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:31:01,601 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:408750x0, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T15:31:01,601 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:408750x0, quorum=127.0.0.1:63112, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T15:31:01,601 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40875-0x101268d9ba70001 connected 2024-11-17T15:31:01,601 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-17T15:31:01,602 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-17T15:31:01,602 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40875-0x101268d9ba70001, quorum=127.0.0.1:63112, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-17T15:31:01,603 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40875-0x101268d9ba70001, quorum=127.0.0.1:63112, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T15:31:01,604 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40875 2024-11-17T15:31:01,604 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40875 2024-11-17T15:31:01,604 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40875 2024-11-17T15:31:01,604 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40875 2024-11-17T15:31:01,604 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40875 2024-11-17T15:31:01,615 DEBUG [M:0;7a780d55532c:45217 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7a780d55532c:45217 2024-11-17T15:31:01,616 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7a780d55532c,45217,1731857461542 2024-11-17T15:31:01,617 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40875-0x101268d9ba70001, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T15:31:01,618 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45217-0x101268d9ba70000, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T15:31:01,618 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45217-0x101268d9ba70000, quorum=127.0.0.1:63112, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7a780d55532c,45217,1731857461542 2024-11-17T15:31:01,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40875-0x101268d9ba70001, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-17T15:31:01,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45217-0x101268d9ba70000, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:31:01,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40875-0x101268d9ba70001, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:31:01,620 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45217-0x101268d9ba70000, quorum=127.0.0.1:63112, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-17T15:31:01,620 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7a780d55532c,45217,1731857461542 from backup master directory 2024-11-17T15:31:01,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45217-0x101268d9ba70000, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7a780d55532c,45217,1731857461542 2024-11-17T15:31:01,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40875-0x101268d9ba70001, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T15:31:01,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45217-0x101268d9ba70000, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T15:31:01,622 WARN [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T15:31:01,622 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7a780d55532c,45217,1731857461542 2024-11-17T15:31:01,625 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/hbase.id] with ID: b78b7a84-d8ff-47cc-b8d2-55b333fd3d5f 2024-11-17T15:31:01,626 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/.tmp/hbase.id 2024-11-17T15:31:01,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34823 is added to blk_1073741826_1002 (size=42) 2024-11-17T15:31:01,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741826_1002 (size=42) 2024-11-17T15:31:01,631 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/.tmp/hbase.id]:[hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/hbase.id] 2024-11-17T15:31:01,641 INFO [master/7a780d55532c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:31:01,642 INFO [master/7a780d55532c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-17T15:31:01,643 INFO [master/7a780d55532c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-17T15:31:01,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40875-0x101268d9ba70001, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:31:01,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45217-0x101268d9ba70000, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:31:01,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34823 is added to blk_1073741827_1003 (size=196) 2024-11-17T15:31:01,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741827_1003 (size=196) 2024-11-17T15:31:01,652 INFO [master/7a780d55532c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T15:31:01,653 INFO [master/7a780d55532c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-17T15:31:01,653 INFO [master/7a780d55532c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T15:31:01,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741828_1004 (size=1189) 2024-11-17T15:31:01,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34823 is added to blk_1073741828_1004 (size=1189) 2024-11-17T15:31:01,660 INFO [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/MasterData/data/master/store 2024-11-17T15:31:01,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741829_1005 (size=34) 2024-11-17T15:31:01,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34823 is added to blk_1073741829_1005 (size=34) 2024-11-17T15:31:01,668 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:31:01,668 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T15:31:01,668 INFO [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:31:01,668 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:31:01,668 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T15:31:01,668 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:31:01,668 INFO [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:31:01,668 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731857461668Disabling compacts and flushes for region at 1731857461668Disabling writes for close at 1731857461668Writing region close event to WAL at 1731857461668Closed at 1731857461668 2024-11-17T15:31:01,669 WARN [master/7a780d55532c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/MasterData/data/master/store/.initializing 2024-11-17T15:31:01,669 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/MasterData/WALs/7a780d55532c,45217,1731857461542 2024-11-17T15:31:01,671 INFO [master/7a780d55532c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a780d55532c%2C45217%2C1731857461542, suffix=, logDir=hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/MasterData/WALs/7a780d55532c,45217,1731857461542, archiveDir=hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/MasterData/oldWALs, maxLogs=10 2024-11-17T15:31:01,671 INFO [master/7a780d55532c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C45217%2C1731857461542.1731857461671 2024-11-17T15:31:01,675 INFO [master/7a780d55532c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/MasterData/WALs/7a780d55532c,45217,1731857461542/7a780d55532c%2C45217%2C1731857461542.1731857461671 2024-11-17T15:31:01,676 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41259:41259),(127.0.0.1/127.0.0.1:38135:38135)] 2024-11-17T15:31:01,679 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-17T15:31:01,680 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:31:01,680 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:31:01,680 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:31:01,681 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:31:01,682 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-17T15:31:01,682 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:31:01,683 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:31:01,683 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:31:01,683 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-17T15:31:01,684 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:31:01,684 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T15:31:01,684 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:31:01,685 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-17T15:31:01,685 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:31:01,685 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T15:31:01,685 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:31:01,686 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-17T15:31:01,686 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:31:01,687 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T15:31:01,687 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:31:01,687 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:31:01,688 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:31:01,689 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:31:01,689 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:31:01,690 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-17T15:31:01,691 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:31:01,692 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T15:31:01,693 INFO [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=770265, jitterRate=-0.02055761218070984}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-17T15:31:01,693 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731857461680Initializing all the Stores at 1731857461681 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857461681Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857461681Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857461681Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857461681Cleaning up temporary data from old regions at 1731857461689 (+8 ms)Region opened successfully at 1731857461693 (+4 ms) 2024-11-17T15:31:01,694 INFO [master/7a780d55532c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-17T15:31:01,697 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e63f143, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7a780d55532c/172.17.0.2:0 2024-11-17T15:31:01,697 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-17T15:31:01,698 INFO [master/7a780d55532c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-17T15:31:01,698 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-17T15:31:01,698 INFO [master/7a780d55532c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-17T15:31:01,698 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-17T15:31:01,698 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-17T15:31:01,698 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-17T15:31:01,700 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-17T15:31:01,701 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45217-0x101268d9ba70000, quorum=127.0.0.1:63112, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-17T15:31:01,702 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-17T15:31:01,703 INFO [master/7a780d55532c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-17T15:31:01,703 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45217-0x101268d9ba70000, quorum=127.0.0.1:63112, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-17T15:31:01,704 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-17T15:31:01,705 INFO [master/7a780d55532c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-17T15:31:01,706 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45217-0x101268d9ba70000, quorum=127.0.0.1:63112, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-17T15:31:01,707 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-17T15:31:01,708 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45217-0x101268d9ba70000, quorum=127.0.0.1:63112, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-17T15:31:01,713 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-17T15:31:01,715 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45217-0x101268d9ba70000, quorum=127.0.0.1:63112, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-17T15:31:01,716 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-17T15:31:01,718 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40875-0x101268d9ba70001, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T15:31:01,718 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45217-0x101268d9ba70000, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T15:31:01,718 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40875-0x101268d9ba70001, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:31:01,718 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45217-0x101268d9ba70000, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:31:01,718 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7a780d55532c,45217,1731857461542, sessionid=0x101268d9ba70000, setting cluster-up flag (Was=false) 2024-11-17T15:31:01,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40875-0x101268d9ba70001, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:31:01,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45217-0x101268d9ba70000, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:31:01,727 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-17T15:31:01,728 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7a780d55532c,45217,1731857461542 2024-11-17T15:31:01,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40875-0x101268d9ba70001, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:31:01,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45217-0x101268d9ba70000, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:31:01,737 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-17T15:31:01,738 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7a780d55532c,45217,1731857461542 2024-11-17T15:31:01,739 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-17T15:31:01,741 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-17T15:31:01,741 INFO [master/7a780d55532c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-17T15:31:01,741 INFO [master/7a780d55532c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-17T15:31:01,741 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7a780d55532c,45217,1731857461542 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-17T15:31:01,742 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7a780d55532c:0, corePoolSize=5, maxPoolSize=5 2024-11-17T15:31:01,742 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7a780d55532c:0, corePoolSize=5, maxPoolSize=5 2024-11-17T15:31:01,742 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7a780d55532c:0, corePoolSize=5, maxPoolSize=5 2024-11-17T15:31:01,742 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7a780d55532c:0, corePoolSize=5, maxPoolSize=5 2024-11-17T15:31:01,742 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7a780d55532c:0, corePoolSize=10, maxPoolSize=10 2024-11-17T15:31:01,742 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:31:01,743 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7a780d55532c:0, corePoolSize=2, maxPoolSize=2 2024-11-17T15:31:01,743 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:31:01,743 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731857491743 2024-11-17T15:31:01,743 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-17T15:31:01,744 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-17T15:31:01,744 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-17T15:31:01,744 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-17T15:31:01,744 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-17T15:31:01,744 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-17T15:31:01,744 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:01,744 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-17T15:31:01,744 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-17T15:31:01,744 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T15:31:01,744 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-17T15:31:01,744 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-17T15:31:01,745 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-17T15:31:01,745 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-17T15:31:01,745 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.large.0-1731857461745,5,FailOnTimeoutGroup] 2024-11-17T15:31:01,746 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.small.0-1731857461745,5,FailOnTimeoutGroup] 2024-11-17T15:31:01,746 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:01,746 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-17T15:31:01,746 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:31:01,746 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:01,746 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:01,746 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-17T15:31:01,751 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:01,751 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:01,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741831_1007 (size=1321) 2024-11-17T15:31:01,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34823 is added to blk_1073741831_1007 (size=1321) 2024-11-17T15:31:01,755 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-17T15:31:01,755 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09 2024-11-17T15:31:01,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741832_1008 (size=32) 2024-11-17T15:31:01,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34823 is added to blk_1073741832_1008 (size=32) 2024-11-17T15:31:01,761 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:31:01,763 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T15:31:01,764 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T15:31:01,764 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:31:01,765 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:31:01,765 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T15:31:01,766 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T15:31:01,766 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:31:01,767 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:31:01,767 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T15:31:01,768 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T15:31:01,768 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:31:01,768 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:31:01,768 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T15:31:01,769 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T15:31:01,769 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:31:01,770 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:31:01,770 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T15:31:01,770 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/hbase/meta/1588230740 2024-11-17T15:31:01,771 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/hbase/meta/1588230740 2024-11-17T15:31:01,772 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T15:31:01,772 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T15:31:01,773 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T15:31:01,774 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T15:31:01,776 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T15:31:01,776 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=825876, jitterRate=0.05015590786933899}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T15:31:01,777 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731857461761Initializing all the Stores at 1731857461762 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857461762Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857461762Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857461762Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857461762Cleaning up temporary data from old regions at 1731857461772 (+10 ms)Region opened successfully at 1731857461777 (+5 ms) 2024-11-17T15:31:01,777 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T15:31:01,777 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T15:31:01,777 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T15:31:01,777 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T15:31:01,777 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T15:31:01,778 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T15:31:01,778 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731857461777Disabling compacts and flushes for region at 1731857461777Disabling writes for close at 1731857461777Writing region close event to WAL at 1731857461778 (+1 ms)Closed at 1731857461778 2024-11-17T15:31:01,779 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T15:31:01,779 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-17T15:31:01,779 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-17T15:31:01,781 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T15:31:01,782 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-17T15:31:01,806 INFO [RS:0;7a780d55532c:40875 {}] regionserver.HRegionServer(746): ClusterId : b78b7a84-d8ff-47cc-b8d2-55b333fd3d5f 2024-11-17T15:31:01,806 DEBUG [RS:0;7a780d55532c:40875 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-17T15:31:01,809 DEBUG [RS:0;7a780d55532c:40875 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-17T15:31:01,809 DEBUG [RS:0;7a780d55532c:40875 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-17T15:31:01,811 DEBUG [RS:0;7a780d55532c:40875 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-17T15:31:01,811 DEBUG [RS:0;7a780d55532c:40875 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e3a9a06, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7a780d55532c/172.17.0.2:0 2024-11-17T15:31:01,823 DEBUG [RS:0;7a780d55532c:40875 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7a780d55532c:40875 2024-11-17T15:31:01,824 INFO [RS:0;7a780d55532c:40875 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-17T15:31:01,824 INFO [RS:0;7a780d55532c:40875 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-17T15:31:01,824 DEBUG [RS:0;7a780d55532c:40875 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-17T15:31:01,824 INFO [RS:0;7a780d55532c:40875 {}] regionserver.HRegionServer(2659): reportForDuty to master=7a780d55532c,45217,1731857461542 with port=40875, startcode=1731857461590 2024-11-17T15:31:01,825 DEBUG [RS:0;7a780d55532c:40875 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-17T15:31:01,826 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39679, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-17T15:31:01,827 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45217 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7a780d55532c,40875,1731857461590 2024-11-17T15:31:01,827 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45217 {}] master.ServerManager(517): Registering regionserver=7a780d55532c,40875,1731857461590 2024-11-17T15:31:01,829 DEBUG [RS:0;7a780d55532c:40875 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09 2024-11-17T15:31:01,829 DEBUG [RS:0;7a780d55532c:40875 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36851 2024-11-17T15:31:01,829 DEBUG [RS:0;7a780d55532c:40875 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-17T15:31:01,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45217-0x101268d9ba70000, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T15:31:01,831 DEBUG [RS:0;7a780d55532c:40875 {}] zookeeper.ZKUtil(111): regionserver:40875-0x101268d9ba70001, quorum=127.0.0.1:63112, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7a780d55532c,40875,1731857461590 2024-11-17T15:31:01,831 WARN [RS:0;7a780d55532c:40875 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T15:31:01,831 INFO [RS:0;7a780d55532c:40875 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T15:31:01,831 DEBUG [RS:0;7a780d55532c:40875 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/WALs/7a780d55532c,40875,1731857461590 2024-11-17T15:31:01,832 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7a780d55532c,40875,1731857461590] 2024-11-17T15:31:01,835 INFO [RS:0;7a780d55532c:40875 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-17T15:31:01,836 INFO [RS:0;7a780d55532c:40875 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-17T15:31:01,836 INFO [RS:0;7a780d55532c:40875 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-17T15:31:01,836 INFO [RS:0;7a780d55532c:40875 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:01,836 INFO [RS:0;7a780d55532c:40875 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-17T15:31:01,837 INFO [RS:0;7a780d55532c:40875 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-17T15:31:01,837 INFO [RS:0;7a780d55532c:40875 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:01,837 DEBUG [RS:0;7a780d55532c:40875 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:31:01,837 DEBUG [RS:0;7a780d55532c:40875 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:31:01,837 DEBUG [RS:0;7a780d55532c:40875 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:31:01,837 DEBUG [RS:0;7a780d55532c:40875 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:31:01,838 DEBUG [RS:0;7a780d55532c:40875 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:31:01,838 DEBUG [RS:0;7a780d55532c:40875 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7a780d55532c:0, corePoolSize=2, maxPoolSize=2 2024-11-17T15:31:01,838 DEBUG [RS:0;7a780d55532c:40875 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:31:01,838 DEBUG [RS:0;7a780d55532c:40875 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:31:01,838 DEBUG [RS:0;7a780d55532c:40875 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:31:01,838 DEBUG [RS:0;7a780d55532c:40875 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:31:01,838 DEBUG [RS:0;7a780d55532c:40875 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:31:01,838 DEBUG [RS:0;7a780d55532c:40875 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:31:01,838 DEBUG [RS:0;7a780d55532c:40875 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7a780d55532c:0, corePoolSize=3, maxPoolSize=3 2024-11-17T15:31:01,838 DEBUG [RS:0;7a780d55532c:40875 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0, corePoolSize=3, maxPoolSize=3 2024-11-17T15:31:01,839 INFO [RS:0;7a780d55532c:40875 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:01,839 INFO [RS:0;7a780d55532c:40875 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:01,839 INFO [RS:0;7a780d55532c:40875 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:01,839 INFO [RS:0;7a780d55532c:40875 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:01,839 INFO [RS:0;7a780d55532c:40875 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:01,839 INFO [RS:0;7a780d55532c:40875 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,40875,1731857461590-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T15:31:01,854 INFO [RS:0;7a780d55532c:40875 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-17T15:31:01,854 INFO [RS:0;7a780d55532c:40875 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,40875,1731857461590-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:01,854 INFO [RS:0;7a780d55532c:40875 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:01,854 INFO [RS:0;7a780d55532c:40875 {}] regionserver.Replication(171): 7a780d55532c,40875,1731857461590 started 2024-11-17T15:31:01,868 INFO [RS:0;7a780d55532c:40875 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:01,869 INFO [RS:0;7a780d55532c:40875 {}] regionserver.HRegionServer(1482): Serving as 7a780d55532c,40875,1731857461590, RpcServer on 7a780d55532c/172.17.0.2:40875, sessionid=0x101268d9ba70001 2024-11-17T15:31:01,869 DEBUG [RS:0;7a780d55532c:40875 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-17T15:31:01,869 DEBUG [RS:0;7a780d55532c:40875 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7a780d55532c,40875,1731857461590 2024-11-17T15:31:01,869 DEBUG [RS:0;7a780d55532c:40875 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a780d55532c,40875,1731857461590' 2024-11-17T15:31:01,869 DEBUG [RS:0;7a780d55532c:40875 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-17T15:31:01,869 DEBUG [RS:0;7a780d55532c:40875 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-17T15:31:01,870 DEBUG [RS:0;7a780d55532c:40875 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-17T15:31:01,870 DEBUG [RS:0;7a780d55532c:40875 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-17T15:31:01,870 DEBUG [RS:0;7a780d55532c:40875 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7a780d55532c,40875,1731857461590 2024-11-17T15:31:01,870 DEBUG [RS:0;7a780d55532c:40875 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a780d55532c,40875,1731857461590' 2024-11-17T15:31:01,870 DEBUG [RS:0;7a780d55532c:40875 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-17T15:31:01,870 DEBUG [RS:0;7a780d55532c:40875 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-17T15:31:01,871 DEBUG [RS:0;7a780d55532c:40875 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-17T15:31:01,871 INFO [RS:0;7a780d55532c:40875 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-17T15:31:01,871 INFO [RS:0;7a780d55532c:40875 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-17T15:31:01,932 WARN [7a780d55532c:45217 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-17T15:31:01,973 INFO [RS:0;7a780d55532c:40875 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a780d55532c%2C40875%2C1731857461590, suffix=, logDir=hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/WALs/7a780d55532c,40875,1731857461590, archiveDir=hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/oldWALs, maxLogs=32 2024-11-17T15:31:01,973 INFO [RS:0;7a780d55532c:40875 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C40875%2C1731857461590.1731857461973 2024-11-17T15:31:01,978 INFO [RS:0;7a780d55532c:40875 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/WALs/7a780d55532c,40875,1731857461590/7a780d55532c%2C40875%2C1731857461590.1731857461973 2024-11-17T15:31:01,980 DEBUG [RS:0;7a780d55532c:40875 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38135:38135),(127.0.0.1/127.0.0.1:41259:41259)] 2024-11-17T15:31:02,183 DEBUG [7a780d55532c:45217 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-17T15:31:02,183 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7a780d55532c,40875,1731857461590 2024-11-17T15:31:02,185 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7a780d55532c,40875,1731857461590, state=OPENING 2024-11-17T15:31:02,187 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-17T15:31:02,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40875-0x101268d9ba70001, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:31:02,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45217-0x101268d9ba70000, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:31:02,189 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T15:31:02,189 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7a780d55532c,40875,1731857461590}] 2024-11-17T15:31:02,189 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T15:31:02,189 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T15:31:02,341 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-17T15:31:02,343 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57999, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-17T15:31:02,347 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-17T15:31:02,347 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T15:31:02,349 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a780d55532c%2C40875%2C1731857461590.meta, suffix=.meta, logDir=hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/WALs/7a780d55532c,40875,1731857461590, archiveDir=hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/oldWALs, maxLogs=32 2024-11-17T15:31:02,349 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C40875%2C1731857461590.meta.1731857462349.meta 2024-11-17T15:31:02,355 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/WALs/7a780d55532c,40875,1731857461590/7a780d55532c%2C40875%2C1731857461590.meta.1731857462349.meta 2024-11-17T15:31:02,356 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38135:38135),(127.0.0.1/127.0.0.1:41259:41259)] 2024-11-17T15:31:02,357 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-17T15:31:02,357 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-17T15:31:02,357 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-17T15:31:02,357 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-17T15:31:02,357 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-17T15:31:02,357 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:31:02,357 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-17T15:31:02,357 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-17T15:31:02,359 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T15:31:02,359 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T15:31:02,359 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:31:02,360 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:31:02,360 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T15:31:02,360 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T15:31:02,360 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:31:02,361 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:31:02,361 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T15:31:02,361 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T15:31:02,361 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:31:02,362 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:31:02,362 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T15:31:02,363 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T15:31:02,363 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:31:02,363 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:31:02,363 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T15:31:02,364 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/hbase/meta/1588230740 2024-11-17T15:31:02,365 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/hbase/meta/1588230740 2024-11-17T15:31:02,366 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T15:31:02,366 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T15:31:02,366 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T15:31:02,367 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T15:31:02,368 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=819559, jitterRate=0.042124003171920776}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T15:31:02,368 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-17T15:31:02,369 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731857462357Writing region info on filesystem at 1731857462357Initializing all the Stores at 1731857462358 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857462358Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857462358Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857462358Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857462358Cleaning up temporary data from old regions at 1731857462366 (+8 ms)Running coprocessor post-open hooks at 1731857462368 (+2 ms)Region opened successfully at 1731857462369 (+1 ms) 2024-11-17T15:31:02,370 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731857462341 2024-11-17T15:31:02,372 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-17T15:31:02,372 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-17T15:31:02,373 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7a780d55532c,40875,1731857461590 2024-11-17T15:31:02,374 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7a780d55532c,40875,1731857461590, state=OPEN 2024-11-17T15:31:02,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40875-0x101268d9ba70001, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T15:31:02,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45217-0x101268d9ba70000, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T15:31:02,380 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7a780d55532c,40875,1731857461590 2024-11-17T15:31:02,380 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T15:31:02,380 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T15:31:02,383 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-17T15:31:02,383 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7a780d55532c,40875,1731857461590 in 191 msec 2024-11-17T15:31:02,385 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-17T15:31:02,386 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 604 msec 2024-11-17T15:31:02,386 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T15:31:02,386 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-17T15:31:02,388 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T15:31:02,388 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7a780d55532c,40875,1731857461590, seqNum=-1] 2024-11-17T15:31:02,388 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T15:31:02,390 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46071, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T15:31:02,395 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 654 msec 2024-11-17T15:31:02,395 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731857462395, completionTime=-1 2024-11-17T15:31:02,395 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-17T15:31:02,395 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-17T15:31:02,397 INFO [master/7a780d55532c:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-17T15:31:02,397 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731857522397 2024-11-17T15:31:02,397 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731857582397 2024-11-17T15:31:02,397 INFO [master/7a780d55532c:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-17T15:31:02,397 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,45217,1731857461542-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:02,397 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,45217,1731857461542-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:02,397 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,45217,1731857461542-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:02,397 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7a780d55532c:45217, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:02,397 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:02,397 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:02,399 DEBUG [master/7a780d55532c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-17T15:31:02,400 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.778sec 2024-11-17T15:31:02,400 INFO [master/7a780d55532c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-17T15:31:02,401 INFO [master/7a780d55532c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-17T15:31:02,401 INFO [master/7a780d55532c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-17T15:31:02,401 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-17T15:31:02,401 INFO [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-17T15:31:02,401 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,45217,1731857461542-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T15:31:02,401 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,45217,1731857461542-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-17T15:31:02,403 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-17T15:31:02,403 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-17T15:31:02,403 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,45217,1731857461542-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:02,406 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@578206ed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T15:31:02,406 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7a780d55532c,45217,-1 for getting cluster id 2024-11-17T15:31:02,406 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-17T15:31:02,408 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'b78b7a84-d8ff-47cc-b8d2-55b333fd3d5f' 2024-11-17T15:31:02,408 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-17T15:31:02,409 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "b78b7a84-d8ff-47cc-b8d2-55b333fd3d5f" 2024-11-17T15:31:02,409 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3892c0bd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T15:31:02,409 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7a780d55532c,45217,-1] 2024-11-17T15:31:02,409 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-17T15:31:02,409 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:31:02,410 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49908, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-17T15:31:02,411 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20320e58, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T15:31:02,412 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T15:31:02,413 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7a780d55532c,40875,1731857461590, seqNum=-1] 2024-11-17T15:31:02,413 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T15:31:02,414 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35710, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T15:31:02,415 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7a780d55532c,45217,1731857461542 2024-11-17T15:31:02,415 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:31:02,418 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-17T15:31:02,418 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-17T15:31:02,419 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 7a780d55532c,45217,1731857461542 2024-11-17T15:31:02,419 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4a9663e3 2024-11-17T15:31:02,419 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-17T15:31:02,420 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49912, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-17T15:31:02,420 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45217 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-17T15:31:02,420 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45217 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-17T15:31:02,421 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45217 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T15:31:02,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45217 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T15:31:02,423 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-17T15:31:02,423 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:31:02,423 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45217 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-17T15:31:02,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45217 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-17T15:31:02,424 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-17T15:31:02,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741835_1011 (size=405) 2024-11-17T15:31:02,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34823 is added to blk_1073741835_1011 (size=405) 2024-11-17T15:31:02,432 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4191d89a3ed4fa8ba35a39be0d7abcdb, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09 2024-11-17T15:31:02,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34823 is added to blk_1073741836_1012 (size=88) 2024-11-17T15:31:02,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741836_1012 (size=88) 2024-11-17T15:31:02,439 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:31:02,439 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 4191d89a3ed4fa8ba35a39be0d7abcdb, disabling compactions & flushes 2024-11-17T15:31:02,439 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb. 2024-11-17T15:31:02,439 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb. 2024-11-17T15:31:02,439 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb. after waiting 0 ms 2024-11-17T15:31:02,439 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb. 2024-11-17T15:31:02,439 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb. 2024-11-17T15:31:02,440 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4191d89a3ed4fa8ba35a39be0d7abcdb: Waiting for close lock at 1731857462439Disabling compacts and flushes for region at 1731857462439Disabling writes for close at 1731857462439Writing region close event to WAL at 1731857462439Closed at 1731857462439 2024-11-17T15:31:02,441 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-17T15:31:02,441 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731857462441"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731857462441"}]},"ts":"1731857462441"} 2024-11-17T15:31:02,444 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-17T15:31:02,445 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-17T15:31:02,445 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731857462445"}]},"ts":"1731857462445"} 2024-11-17T15:31:02,447 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-17T15:31:02,447 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=4191d89a3ed4fa8ba35a39be0d7abcdb, ASSIGN}] 2024-11-17T15:31:02,449 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=4191d89a3ed4fa8ba35a39be0d7abcdb, ASSIGN 2024-11-17T15:31:02,450 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=4191d89a3ed4fa8ba35a39be0d7abcdb, ASSIGN; state=OFFLINE, location=7a780d55532c,40875,1731857461590; forceNewPlan=false, retain=false 2024-11-17T15:31:02,601 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4191d89a3ed4fa8ba35a39be0d7abcdb, regionState=OPENING, regionLocation=7a780d55532c,40875,1731857461590 2024-11-17T15:31:02,603 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=4191d89a3ed4fa8ba35a39be0d7abcdb, ASSIGN because future has completed 2024-11-17T15:31:02,604 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4191d89a3ed4fa8ba35a39be0d7abcdb, server=7a780d55532c,40875,1731857461590}] 2024-11-17T15:31:02,752 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:02,752 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:02,760 INFO [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb. 2024-11-17T15:31:02,760 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 4191d89a3ed4fa8ba35a39be0d7abcdb, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb.', STARTKEY => '', ENDKEY => ''} 2024-11-17T15:31:02,761 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 4191d89a3ed4fa8ba35a39be0d7abcdb 2024-11-17T15:31:02,761 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:31:02,761 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 4191d89a3ed4fa8ba35a39be0d7abcdb 2024-11-17T15:31:02,761 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 4191d89a3ed4fa8ba35a39be0d7abcdb 2024-11-17T15:31:02,762 INFO [StoreOpener-4191d89a3ed4fa8ba35a39be0d7abcdb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 4191d89a3ed4fa8ba35a39be0d7abcdb 2024-11-17T15:31:02,763 INFO [StoreOpener-4191d89a3ed4fa8ba35a39be0d7abcdb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4191d89a3ed4fa8ba35a39be0d7abcdb columnFamilyName info 2024-11-17T15:31:02,763 DEBUG [StoreOpener-4191d89a3ed4fa8ba35a39be0d7abcdb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:31:02,764 INFO [StoreOpener-4191d89a3ed4fa8ba35a39be0d7abcdb-1 {}] regionserver.HStore(327): Store=4191d89a3ed4fa8ba35a39be0d7abcdb/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T15:31:02,764 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 4191d89a3ed4fa8ba35a39be0d7abcdb 2024-11-17T15:31:02,764 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb 2024-11-17T15:31:02,765 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb 2024-11-17T15:31:02,765 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 4191d89a3ed4fa8ba35a39be0d7abcdb 2024-11-17T15:31:02,765 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 4191d89a3ed4fa8ba35a39be0d7abcdb 2024-11-17T15:31:02,767 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 4191d89a3ed4fa8ba35a39be0d7abcdb 2024-11-17T15:31:02,768 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T15:31:02,769 INFO [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 4191d89a3ed4fa8ba35a39be0d7abcdb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=831166, jitterRate=0.05688244104385376}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-17T15:31:02,769 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4191d89a3ed4fa8ba35a39be0d7abcdb 2024-11-17T15:31:02,769 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 4191d89a3ed4fa8ba35a39be0d7abcdb: Running coprocessor pre-open hook at 1731857462761Writing region info on filesystem at 1731857462761Initializing all the Stores at 1731857462762 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857462762Cleaning up temporary data from old regions at 1731857462765 (+3 ms)Running coprocessor post-open hooks at 1731857462769 (+4 ms)Region opened successfully at 1731857462769 2024-11-17T15:31:02,770 INFO [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb., pid=6, masterSystemTime=1731857462756 2024-11-17T15:31:02,773 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb. 2024-11-17T15:31:02,773 INFO [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb. 2024-11-17T15:31:02,774 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4191d89a3ed4fa8ba35a39be0d7abcdb, regionState=OPEN, openSeqNum=2, regionLocation=7a780d55532c,40875,1731857461590 2024-11-17T15:31:02,776 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4191d89a3ed4fa8ba35a39be0d7abcdb, server=7a780d55532c,40875,1731857461590 because future has completed 2024-11-17T15:31:02,780 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-17T15:31:02,780 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 4191d89a3ed4fa8ba35a39be0d7abcdb, server=7a780d55532c,40875,1731857461590 in 173 msec 2024-11-17T15:31:02,783 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-17T15:31:02,783 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=4191d89a3ed4fa8ba35a39be0d7abcdb, ASSIGN in 333 msec 2024-11-17T15:31:02,784 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-17T15:31:02,784 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731857462784"}]},"ts":"1731857462784"} 2024-11-17T15:31:02,786 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-17T15:31:02,787 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-17T15:31:02,789 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 366 msec 2024-11-17T15:31:03,752 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:03,752 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:04,753 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:04,753 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:05,754 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:05,754 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:06,754 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:06,754 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:06,881 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-17T15:31:06,882 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:06,882 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:06,883 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:06,883 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:06,883 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:06,883 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:06,901 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:06,902 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:06,902 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:06,902 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:06,903 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:06,903 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:06,908 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:06,908 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:06,909 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:06,912 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:07,755 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:07,755 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:07,835 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-17T15:31:07,835 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-17T15:31:08,756 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:08,756 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:09,756 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:09,756 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:10,757 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:10,757 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:11,379 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-17T15:31:11,379 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-17T15:31:11,380 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T15:31:11,380 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-17T15:31:11,380 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-17T15:31:11,380 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-17T15:31:11,380 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T15:31:11,380 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-17T15:31:11,757 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:11,757 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:12,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45217 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-17T15:31:12,506 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-17T15:31:12,506 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-17T15:31:12,509 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T15:31:12,509 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb. 2024-11-17T15:31:12,515 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb., hostname=7a780d55532c,40875,1731857461590, seqNum=2] 2024-11-17T15:31:12,522 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45217 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T15:31:12,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45217 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T15:31:12,528 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-17T15:31:12,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45217 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-17T15:31:12,529 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T15:31:12,531 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T15:31:12,691 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40875 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-17T15:31:12,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb. 2024-11-17T15:31:12,692 INFO [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 4191d89a3ed4fa8ba35a39be0d7abcdb 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-17T15:31:12,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/.tmp/info/fbee1561ecc44cdd961b70557c1b37aa is 1080, key is row0001/info:/1731857472516/Put/seqid=0 2024-11-17T15:31:12,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34823 is added to blk_1073741837_1013 (size=6033) 2024-11-17T15:31:12,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741837_1013 (size=6033) 2024-11-17T15:31:12,714 INFO [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/.tmp/info/fbee1561ecc44cdd961b70557c1b37aa 2024-11-17T15:31:12,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/.tmp/info/fbee1561ecc44cdd961b70557c1b37aa as hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/info/fbee1561ecc44cdd961b70557c1b37aa 2024-11-17T15:31:12,725 INFO [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/info/fbee1561ecc44cdd961b70557c1b37aa, entries=1, sequenceid=5, filesize=5.9 K 2024-11-17T15:31:12,726 INFO [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 4191d89a3ed4fa8ba35a39be0d7abcdb in 34ms, sequenceid=5, compaction requested=false 2024-11-17T15:31:12,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 4191d89a3ed4fa8ba35a39be0d7abcdb: 2024-11-17T15:31:12,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb. 2024-11-17T15:31:12,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-17T15:31:12,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45217 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-17T15:31:12,734 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-17T15:31:12,734 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 200 msec 2024-11-17T15:31:12,736 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 211 msec 2024-11-17T15:31:12,758 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:12,758 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:13,759 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:13,759 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:14,759 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:14,759 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:15,760 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:15,760 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:16,761 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:16,761 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:17,761 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:17,761 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:18,762 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:18,762 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:19,762 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:19,762 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:20,763 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:20,763 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:21,764 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:21,764 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:22,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45217 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-17T15:31:22,596 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-17T15:31:22,599 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45217 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T15:31:22,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45217 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T15:31:22,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45217 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-17T15:31:22,601 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-17T15:31:22,602 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T15:31:22,602 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T15:31:22,755 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40875 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-17T15:31:22,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb. 2024-11-17T15:31:22,756 INFO [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 4191d89a3ed4fa8ba35a39be0d7abcdb 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-17T15:31:22,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/.tmp/info/4567bd7014434a5f925861e11fd8fd32 is 1080, key is row0002/info:/1731857482597/Put/seqid=0 2024-11-17T15:31:22,764 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:22,764 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:22,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34823 is added to blk_1073741838_1014 (size=6033) 2024-11-17T15:31:22,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741838_1014 (size=6033) 2024-11-17T15:31:22,766 INFO [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/.tmp/info/4567bd7014434a5f925861e11fd8fd32 2024-11-17T15:31:22,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/.tmp/info/4567bd7014434a5f925861e11fd8fd32 as hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/info/4567bd7014434a5f925861e11fd8fd32 2024-11-17T15:31:22,778 INFO [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/info/4567bd7014434a5f925861e11fd8fd32, entries=1, sequenceid=9, filesize=5.9 K 2024-11-17T15:31:22,779 INFO [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 4191d89a3ed4fa8ba35a39be0d7abcdb in 23ms, sequenceid=9, compaction requested=false 2024-11-17T15:31:22,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 4191d89a3ed4fa8ba35a39be0d7abcdb: 2024-11-17T15:31:22,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb. 2024-11-17T15:31:22,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-17T15:31:22,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45217 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-17T15:31:22,783 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-17T15:31:22,783 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 179 msec 2024-11-17T15:31:22,786 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 185 msec 2024-11-17T15:31:23,765 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:23,765 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:24,765 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:24,765 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:25,766 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:25,766 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:26,767 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:26,767 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:26,767 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 after 68048ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:31:26,767 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta after 68035ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T15:31:27,768 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:27,768 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:28,768 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:28,768 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:29,769 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:29,769 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:30,769 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:30,769 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:31,525 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-17T15:31:31,770 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:31,770 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:32,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45217 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-17T15:31:32,636 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-17T15:31:32,639 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C40875%2C1731857461590.1731857492639 2024-11-17T15:31:32,645 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:31:32,645 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:31:32,645 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:31:32,645 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:31:32,645 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:31:32,645 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/WALs/7a780d55532c,40875,1731857461590/7a780d55532c%2C40875%2C1731857461590.1731857461973 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/WALs/7a780d55532c,40875,1731857461590/7a780d55532c%2C40875%2C1731857461590.1731857492639 2024-11-17T15:31:32,646 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38135:38135),(127.0.0.1/127.0.0.1:41259:41259)] 2024-11-17T15:31:32,646 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/WALs/7a780d55532c,40875,1731857461590/7a780d55532c%2C40875%2C1731857461590.1731857461973 is not closed yet, will try archiving it next time 2024-11-17T15:31:32,647 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45217 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T15:31:32,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34823 is added to blk_1073741833_1009 (size=5546) 2024-11-17T15:31:32,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741833_1009 (size=5546) 2024-11-17T15:31:32,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45217 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T15:31:32,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45217 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-17T15:31:32,650 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-17T15:31:32,651 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T15:31:32,651 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T15:31:32,771 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:32,771 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:32,804 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40875 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-17T15:31:32,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb. 2024-11-17T15:31:32,805 INFO [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 4191d89a3ed4fa8ba35a39be0d7abcdb 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-17T15:31:32,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/.tmp/info/26cb3124f8fa45c28ac26523a6c0071d is 1080, key is row0003/info:/1731857492637/Put/seqid=0 2024-11-17T15:31:32,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34823 is added to blk_1073741840_1016 (size=6033) 2024-11-17T15:31:32,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741840_1016 (size=6033) 2024-11-17T15:31:32,815 INFO [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/.tmp/info/26cb3124f8fa45c28ac26523a6c0071d 2024-11-17T15:31:32,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/.tmp/info/26cb3124f8fa45c28ac26523a6c0071d as hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/info/26cb3124f8fa45c28ac26523a6c0071d 2024-11-17T15:31:32,826 INFO [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/info/26cb3124f8fa45c28ac26523a6c0071d, entries=1, sequenceid=13, filesize=5.9 K 2024-11-17T15:31:32,828 INFO [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 4191d89a3ed4fa8ba35a39be0d7abcdb in 22ms, sequenceid=13, compaction requested=true 2024-11-17T15:31:32,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 4191d89a3ed4fa8ba35a39be0d7abcdb: 2024-11-17T15:31:32,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb. 2024-11-17T15:31:32,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-17T15:31:32,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45217 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-17T15:31:32,832 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-17T15:31:32,832 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 179 msec 2024-11-17T15:31:32,835 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 186 msec 2024-11-17T15:31:33,771 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:33,771 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:34,772 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:34,772 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:35,773 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:35,773 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:36,773 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:36,773 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:37,774 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:37,774 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:38,774 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:38,774 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:39,775 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:39,775 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:40,775 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:40,775 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:41,776 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:41,776 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:42,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45217 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-17T15:31:42,677 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-17T15:31:42,677 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T15:31:42,678 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T15:31:42,678 DEBUG [Time-limited test {}] regionserver.HStore(1541): 4191d89a3ed4fa8ba35a39be0d7abcdb/info is initiating minor compaction (all files) 2024-11-17T15:31:42,678 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-17T15:31:42,678 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:42,678 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 4191d89a3ed4fa8ba35a39be0d7abcdb/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb. 2024-11-17T15:31:42,678 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/info/fbee1561ecc44cdd961b70557c1b37aa, hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/info/4567bd7014434a5f925861e11fd8fd32, hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/info/26cb3124f8fa45c28ac26523a6c0071d] into tmpdir=hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/.tmp, totalSize=17.7 K 2024-11-17T15:31:42,679 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting fbee1561ecc44cdd961b70557c1b37aa, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731857472516 2024-11-17T15:31:42,679 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 4567bd7014434a5f925861e11fd8fd32, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731857482597 2024-11-17T15:31:42,680 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 26cb3124f8fa45c28ac26523a6c0071d, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731857492637 2024-11-17T15:31:42,690 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 4191d89a3ed4fa8ba35a39be0d7abcdb#info#compaction#47 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T15:31:42,690 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/.tmp/info/03e3454cc92f46c999b9675762e00853 is 1080, key is row0001/info:/1731857472516/Put/seqid=0 2024-11-17T15:31:42,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741841_1017 (size=8296) 2024-11-17T15:31:42,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34823 is added to blk_1073741841_1017 (size=8296) 2024-11-17T15:31:42,702 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/.tmp/info/03e3454cc92f46c999b9675762e00853 as hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/info/03e3454cc92f46c999b9675762e00853 2024-11-17T15:31:42,707 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4191d89a3ed4fa8ba35a39be0d7abcdb/info of 4191d89a3ed4fa8ba35a39be0d7abcdb into 03e3454cc92f46c999b9675762e00853(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T15:31:42,707 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 4191d89a3ed4fa8ba35a39be0d7abcdb: 2024-11-17T15:31:42,710 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C40875%2C1731857461590.1731857502710 2024-11-17T15:31:42,715 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:31:42,715 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:31:42,715 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:31:42,716 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:31:42,716 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:31:42,716 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/WALs/7a780d55532c,40875,1731857461590/7a780d55532c%2C40875%2C1731857461590.1731857492639 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/WALs/7a780d55532c,40875,1731857461590/7a780d55532c%2C40875%2C1731857461590.1731857502710 2024-11-17T15:31:42,717 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38135:38135),(127.0.0.1/127.0.0.1:41259:41259)] 2024-11-17T15:31:42,717 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/WALs/7a780d55532c,40875,1731857461590/7a780d55532c%2C40875%2C1731857461590.1731857492639 is not closed yet, will try archiving it next time 2024-11-17T15:31:42,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34823 is added to blk_1073741839_1015 (size=2520) 2024-11-17T15:31:42,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741839_1015 (size=2520) 2024-11-17T15:31:42,718 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/WALs/7a780d55532c,40875,1731857461590/7a780d55532c%2C40875%2C1731857461590.1731857461973 to hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/oldWALs/7a780d55532c%2C40875%2C1731857461590.1731857461973 2024-11-17T15:31:42,719 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45217 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T15:31:42,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45217 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T15:31:42,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45217 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-17T15:31:42,721 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-17T15:31:42,722 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T15:31:42,722 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T15:31:42,735 INFO [master/7a780d55532c:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-17T15:31:42,735 INFO [master/7a780d55532c:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-17T15:31:42,777 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:42,777 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:42,875 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40875 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-17T15:31:42,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb. 2024-11-17T15:31:42,875 INFO [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 4191d89a3ed4fa8ba35a39be0d7abcdb 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-17T15:31:42,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/.tmp/info/e1a5b7195f3f476f96ff60a733059e21 is 1080, key is row0000/info:/1731857502709/Put/seqid=0 2024-11-17T15:31:42,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34823 is added to blk_1073741843_1019 (size=6033) 2024-11-17T15:31:42,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741843_1019 (size=6033) 2024-11-17T15:31:42,885 INFO [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/.tmp/info/e1a5b7195f3f476f96ff60a733059e21 2024-11-17T15:31:42,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/.tmp/info/e1a5b7195f3f476f96ff60a733059e21 as hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/info/e1a5b7195f3f476f96ff60a733059e21 2024-11-17T15:31:42,895 INFO [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/info/e1a5b7195f3f476f96ff60a733059e21, entries=1, sequenceid=18, filesize=5.9 K 2024-11-17T15:31:42,896 INFO [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 4191d89a3ed4fa8ba35a39be0d7abcdb in 21ms, sequenceid=18, compaction requested=false 2024-11-17T15:31:42,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 4191d89a3ed4fa8ba35a39be0d7abcdb: 2024-11-17T15:31:42,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb. 2024-11-17T15:31:42,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-17T15:31:42,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45217 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-17T15:31:42,901 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-17T15:31:42,901 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 176 msec 2024-11-17T15:31:42,903 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 183 msec 2024-11-17T15:31:43,777 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:43,777 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:44,778 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:44,778 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:45,778 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:45,778 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:46,779 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:46,779 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:47,761 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 4191d89a3ed4fa8ba35a39be0d7abcdb, had cached 0 bytes from a total of 14329 2024-11-17T15:31:47,780 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:47,780 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:48,780 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:48,780 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:49,781 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:49,781 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:50,781 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:50,781 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:51,782 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:51,782 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:52,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45217 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-17T15:31:52,756 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-17T15:31:52,758 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C40875%2C1731857461590.1731857512758 2024-11-17T15:31:52,764 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:31:52,764 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:31:52,764 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:31:52,764 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:31:52,764 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:31:52,764 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/WALs/7a780d55532c,40875,1731857461590/7a780d55532c%2C40875%2C1731857461590.1731857502710 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/WALs/7a780d55532c,40875,1731857461590/7a780d55532c%2C40875%2C1731857461590.1731857512758 2024-11-17T15:31:52,765 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38135:38135),(127.0.0.1/127.0.0.1:41259:41259)] 2024-11-17T15:31:52,765 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/WALs/7a780d55532c,40875,1731857461590/7a780d55532c%2C40875%2C1731857461590.1731857502710 is not closed yet, will try archiving it next time 2024-11-17T15:31:52,765 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/WALs/7a780d55532c,40875,1731857461590/7a780d55532c%2C40875%2C1731857461590.1731857492639 to hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/oldWALs/7a780d55532c%2C40875%2C1731857461590.1731857492639 2024-11-17T15:31:52,765 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-17T15:31:52,765 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T15:31:52,765 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T15:31:52,766 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:31:52,766 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:31:52,766 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-17T15:31:52,766 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-17T15:31:52,766 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1734479598, stopped=false 2024-11-17T15:31:52,766 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7a780d55532c,45217,1731857461542 2024-11-17T15:31:52,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34823 is added to blk_1073741842_1018 (size=2026) 2024-11-17T15:31:52,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741842_1018 (size=2026) 2024-11-17T15:31:52,768 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45217-0x101268d9ba70000, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T15:31:52,768 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45217-0x101268d9ba70000, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:31:52,768 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T15:31:52,768 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T15:31:52,768 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T15:31:52,768 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:31:52,768 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45217-0x101268d9ba70000, quorum=127.0.0.1:63112, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T15:31:52,768 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7a780d55532c,40875,1731857461590' ***** 2024-11-17T15:31:52,769 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-17T15:31:52,769 INFO [RS:0;7a780d55532c:40875 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-17T15:31:52,769 INFO [RS:0;7a780d55532c:40875 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-17T15:31:52,769 INFO [RS:0;7a780d55532c:40875 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-17T15:31:52,769 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-17T15:31:52,769 INFO [RS:0;7a780d55532c:40875 {}] regionserver.HRegionServer(3091): Received CLOSE for 4191d89a3ed4fa8ba35a39be0d7abcdb 2024-11-17T15:31:52,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40875-0x101268d9ba70001, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T15:31:52,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40875-0x101268d9ba70001, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:31:52,769 INFO [RS:0;7a780d55532c:40875 {}] regionserver.HRegionServer(959): stopping server 7a780d55532c,40875,1731857461590 2024-11-17T15:31:52,769 INFO [RS:0;7a780d55532c:40875 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T15:31:52,770 INFO [RS:0;7a780d55532c:40875 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7a780d55532c:40875. 2024-11-17T15:31:52,770 DEBUG [RS:0;7a780d55532c:40875 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T15:31:52,770 DEBUG [RS:0;7a780d55532c:40875 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:31:52,770 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 4191d89a3ed4fa8ba35a39be0d7abcdb, disabling compactions & flushes 2024-11-17T15:31:52,770 INFO [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb. 2024-11-17T15:31:52,770 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb. 2024-11-17T15:31:52,770 INFO [RS:0;7a780d55532c:40875 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-17T15:31:52,770 INFO [RS:0;7a780d55532c:40875 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-17T15:31:52,770 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb. after waiting 0 ms 2024-11-17T15:31:52,770 INFO [RS:0;7a780d55532c:40875 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-17T15:31:52,770 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb. 2024-11-17T15:31:52,770 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40875-0x101268d9ba70001, quorum=127.0.0.1:63112, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T15:31:52,770 INFO [RS:0;7a780d55532c:40875 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-17T15:31:52,770 INFO [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 4191d89a3ed4fa8ba35a39be0d7abcdb 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-17T15:31:52,770 INFO [RS:0;7a780d55532c:40875 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-17T15:31:52,770 DEBUG [RS:0;7a780d55532c:40875 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 4191d89a3ed4fa8ba35a39be0d7abcdb=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb.} 2024-11-17T15:31:52,770 DEBUG [RS:0;7a780d55532c:40875 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 4191d89a3ed4fa8ba35a39be0d7abcdb 2024-11-17T15:31:52,770 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T15:31:52,770 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T15:31:52,770 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T15:31:52,770 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T15:31:52,770 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T15:31:52,770 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-17T15:31:52,775 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/.tmp/info/4760fc2c63df4494ac449df9e1a5a71d is 1080, key is row0001/info:/1731857512757/Put/seqid=0 2024-11-17T15:31:52,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34823 is added to blk_1073741845_1021 (size=6033) 2024-11-17T15:31:52,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741845_1021 (size=6033) 2024-11-17T15:31:52,780 INFO [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/.tmp/info/4760fc2c63df4494ac449df9e1a5a71d 2024-11-17T15:31:52,782 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:52,782 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:52,785 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/.tmp/info/4760fc2c63df4494ac449df9e1a5a71d as hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/info/4760fc2c63df4494ac449df9e1a5a71d 2024-11-17T15:31:52,789 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/hbase/meta/1588230740/.tmp/info/5c2ae8cdd1f34bf59058b326aa4a5e87 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb./info:regioninfo/1731857462774/Put/seqid=0 2024-11-17T15:31:52,790 INFO [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/info/4760fc2c63df4494ac449df9e1a5a71d, entries=1, sequenceid=22, filesize=5.9 K 2024-11-17T15:31:52,791 INFO [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 4191d89a3ed4fa8ba35a39be0d7abcdb in 21ms, sequenceid=22, compaction requested=true 2024-11-17T15:31:52,791 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/info/fbee1561ecc44cdd961b70557c1b37aa, hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/info/4567bd7014434a5f925861e11fd8fd32, hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/info/26cb3124f8fa45c28ac26523a6c0071d] to archive 2024-11-17T15:31:52,792 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T15:31:52,794 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/info/fbee1561ecc44cdd961b70557c1b37aa to hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/info/fbee1561ecc44cdd961b70557c1b37aa 2024-11-17T15:31:52,795 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/info/4567bd7014434a5f925861e11fd8fd32 to hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/info/4567bd7014434a5f925861e11fd8fd32 2024-11-17T15:31:52,796 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/info/26cb3124f8fa45c28ac26523a6c0071d to hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/info/26cb3124f8fa45c28ac26523a6c0071d 2024-11-17T15:31:52,796 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=7a780d55532c:45217 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-17T15:31:52,797 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [fbee1561ecc44cdd961b70557c1b37aa=6033, 4567bd7014434a5f925861e11fd8fd32=6033, 26cb3124f8fa45c28ac26523a6c0071d=6033] 2024-11-17T15:31:52,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741846_1022 (size=7308) 2024-11-17T15:31:52,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34823 is added to blk_1073741846_1022 (size=7308) 2024-11-17T15:31:52,803 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/hbase/meta/1588230740/.tmp/info/5c2ae8cdd1f34bf59058b326aa4a5e87 2024-11-17T15:31:52,805 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4191d89a3ed4fa8ba35a39be0d7abcdb/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-17T15:31:52,805 INFO [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb. 2024-11-17T15:31:52,806 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 4191d89a3ed4fa8ba35a39be0d7abcdb: Waiting for close lock at 1731857512770Running coprocessor pre-close hooks at 1731857512770Disabling compacts and flushes for region at 1731857512770Disabling writes for close at 1731857512770Obtaining lock to block concurrent updates at 1731857512770Preparing flush snapshotting stores in 4191d89a3ed4fa8ba35a39be0d7abcdb at 1731857512770Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731857512770Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb. at 1731857512771 (+1 ms)Flushing 4191d89a3ed4fa8ba35a39be0d7abcdb/info: creating writer at 1731857512771Flushing 4191d89a3ed4fa8ba35a39be0d7abcdb/info: appending metadata at 1731857512775 (+4 ms)Flushing 4191d89a3ed4fa8ba35a39be0d7abcdb/info: closing flushed file at 1731857512775Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4cbd13ad: reopening flushed file at 1731857512785 (+10 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 4191d89a3ed4fa8ba35a39be0d7abcdb in 21ms, sequenceid=22, compaction requested=true at 1731857512791 (+6 ms)Writing region close event to WAL at 1731857512800 (+9 ms)Running coprocessor post-close hooks at 1731857512805 (+5 ms)Closed at 1731857512805 2024-11-17T15:31:52,806 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731857462420.4191d89a3ed4fa8ba35a39be0d7abcdb. 2024-11-17T15:31:52,822 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/hbase/meta/1588230740/.tmp/ns/6983c49c3e234270946264d9cc117ab8 is 43, key is default/ns:d/1731857462390/Put/seqid=0 2024-11-17T15:31:52,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741847_1023 (size=5153) 2024-11-17T15:31:52,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34823 is added to blk_1073741847_1023 (size=5153) 2024-11-17T15:31:52,827 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/hbase/meta/1588230740/.tmp/ns/6983c49c3e234270946264d9cc117ab8 2024-11-17T15:31:52,845 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/hbase/meta/1588230740/.tmp/table/8277f9cbb91f482aab01a4b1dd027a17 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731857462784/Put/seqid=0 2024-11-17T15:31:52,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34823 is added to blk_1073741848_1024 (size=5508) 2024-11-17T15:31:52,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741848_1024 (size=5508) 2024-11-17T15:31:52,850 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/hbase/meta/1588230740/.tmp/table/8277f9cbb91f482aab01a4b1dd027a17 2024-11-17T15:31:52,852 INFO [regionserver/7a780d55532c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-17T15:31:52,852 INFO [regionserver/7a780d55532c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-17T15:31:52,855 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/hbase/meta/1588230740/.tmp/info/5c2ae8cdd1f34bf59058b326aa4a5e87 as hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/hbase/meta/1588230740/info/5c2ae8cdd1f34bf59058b326aa4a5e87 2024-11-17T15:31:52,860 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/hbase/meta/1588230740/info/5c2ae8cdd1f34bf59058b326aa4a5e87, entries=10, sequenceid=11, filesize=7.1 K 2024-11-17T15:31:52,861 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/hbase/meta/1588230740/.tmp/ns/6983c49c3e234270946264d9cc117ab8 as hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/hbase/meta/1588230740/ns/6983c49c3e234270946264d9cc117ab8 2024-11-17T15:31:52,865 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/hbase/meta/1588230740/ns/6983c49c3e234270946264d9cc117ab8, entries=2, sequenceid=11, filesize=5.0 K 2024-11-17T15:31:52,866 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/hbase/meta/1588230740/.tmp/table/8277f9cbb91f482aab01a4b1dd027a17 as hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/hbase/meta/1588230740/table/8277f9cbb91f482aab01a4b1dd027a17 2024-11-17T15:31:52,870 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/hbase/meta/1588230740/table/8277f9cbb91f482aab01a4b1dd027a17, entries=2, sequenceid=11, filesize=5.4 K 2024-11-17T15:31:52,871 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 101ms, sequenceid=11, compaction requested=false 2024-11-17T15:31:52,875 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-17T15:31:52,876 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T15:31:52,876 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T15:31:52,876 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731857512770Running coprocessor pre-close hooks at 1731857512770Disabling compacts and flushes for region at 1731857512770Disabling writes for close at 1731857512770Obtaining lock to block concurrent updates at 1731857512770Preparing flush snapshotting stores in 1588230740 at 1731857512770Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731857512771 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731857512771Flushing 1588230740/info: creating writer at 1731857512771Flushing 1588230740/info: appending metadata at 1731857512788 (+17 ms)Flushing 1588230740/info: closing flushed file at 1731857512788Flushing 1588230740/ns: creating writer at 1731857512808 (+20 ms)Flushing 1588230740/ns: appending metadata at 1731857512821 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1731857512821Flushing 1588230740/table: creating writer at 1731857512831 (+10 ms)Flushing 1588230740/table: appending metadata at 1731857512844 (+13 ms)Flushing 1588230740/table: closing flushed file at 1731857512845 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2bc42bbd: reopening flushed file at 1731857512854 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42fe4d2c: reopening flushed file at 1731857512860 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@e2f1ce3: reopening flushed file at 1731857512865 (+5 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 101ms, sequenceid=11, compaction requested=false at 1731857512871 (+6 ms)Writing region close event to WAL at 1731857512872 (+1 ms)Running coprocessor post-close hooks at 1731857512876 (+4 ms)Closed at 1731857512876 2024-11-17T15:31:52,876 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-17T15:31:52,970 INFO [RS:0;7a780d55532c:40875 {}] regionserver.HRegionServer(976): stopping server 7a780d55532c,40875,1731857461590; all regions closed. 2024-11-17T15:31:52,971 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:31:52,971 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:31:52,971 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:31:52,971 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:31:52,971 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:31:52,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34823 is added to blk_1073741834_1010 (size=3306) 2024-11-17T15:31:52,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741834_1010 (size=3306) 2024-11-17T15:31:52,977 DEBUG [RS:0;7a780d55532c:40875 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/oldWALs 2024-11-17T15:31:52,977 INFO [RS:0;7a780d55532c:40875 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7a780d55532c%2C40875%2C1731857461590.meta:.meta(num 1731857462349) 2024-11-17T15:31:52,977 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:31:52,977 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:31:52,977 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:31:52,977 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:31:52,978 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:31:52,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34823 is added to blk_1073741844_1020 (size=1252) 2024-11-17T15:31:52,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741844_1020 (size=1252) 2024-11-17T15:31:52,982 DEBUG [RS:0;7a780d55532c:40875 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/oldWALs 2024-11-17T15:31:52,982 INFO [RS:0;7a780d55532c:40875 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7a780d55532c%2C40875%2C1731857461590:(num 1731857512758) 2024-11-17T15:31:52,982 DEBUG [RS:0;7a780d55532c:40875 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:31:52,982 INFO [RS:0;7a780d55532c:40875 {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T15:31:52,982 INFO [RS:0;7a780d55532c:40875 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T15:31:52,982 INFO [RS:0;7a780d55532c:40875 {}] hbase.ChoreService(370): Chore service for: regionserver/7a780d55532c:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-17T15:31:52,982 INFO [RS:0;7a780d55532c:40875 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T15:31:52,982 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T15:31:52,983 INFO [RS:0;7a780d55532c:40875 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40875 2024-11-17T15:31:52,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45217-0x101268d9ba70000, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T15:31:52,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40875-0x101268d9ba70001, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7a780d55532c,40875,1731857461590 2024-11-17T15:31:52,985 INFO [RS:0;7a780d55532c:40875 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T15:31:52,986 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7a780d55532c,40875,1731857461590] 2024-11-17T15:31:52,989 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7a780d55532c,40875,1731857461590 already deleted, retry=false 2024-11-17T15:31:52,989 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7a780d55532c,40875,1731857461590 expired; onlineServers=0 2024-11-17T15:31:52,989 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7a780d55532c,45217,1731857461542' ***** 2024-11-17T15:31:52,989 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-17T15:31:52,989 INFO [M:0;7a780d55532c:45217 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T15:31:52,989 INFO [M:0;7a780d55532c:45217 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T15:31:52,989 DEBUG [M:0;7a780d55532c:45217 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-17T15:31:52,989 DEBUG [M:0;7a780d55532c:45217 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-17T15:31:52,989 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-17T15:31:52,989 DEBUG [master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.large.0-1731857461745 {}] cleaner.HFileCleaner(306): Exit Thread[master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.large.0-1731857461745,5,FailOnTimeoutGroup] 2024-11-17T15:31:52,989 DEBUG [master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.small.0-1731857461745 {}] cleaner.HFileCleaner(306): Exit Thread[master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.small.0-1731857461745,5,FailOnTimeoutGroup] 2024-11-17T15:31:52,989 INFO [M:0;7a780d55532c:45217 {}] hbase.ChoreService(370): Chore service for: master/7a780d55532c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-17T15:31:52,989 INFO [M:0;7a780d55532c:45217 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T15:31:52,989 DEBUG [M:0;7a780d55532c:45217 {}] master.HMaster(1795): Stopping service threads 2024-11-17T15:31:52,989 INFO [M:0;7a780d55532c:45217 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-17T15:31:52,989 INFO [M:0;7a780d55532c:45217 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T15:31:52,990 INFO [M:0;7a780d55532c:45217 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-17T15:31:52,990 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-17T15:31:52,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45217-0x101268d9ba70000, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-17T15:31:52,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45217-0x101268d9ba70000, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:31:52,991 DEBUG [M:0;7a780d55532c:45217 {}] zookeeper.ZKUtil(347): master:45217-0x101268d9ba70000, quorum=127.0.0.1:63112, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-17T15:31:52,991 WARN [M:0;7a780d55532c:45217 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-17T15:31:52,991 INFO [M:0;7a780d55532c:45217 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/.lastflushedseqids 2024-11-17T15:31:52,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34823 is added to blk_1073741849_1025 (size=130) 2024-11-17T15:31:52,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741849_1025 (size=130) 2024-11-17T15:31:52,996 INFO [M:0;7a780d55532c:45217 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-17T15:31:52,996 INFO [M:0;7a780d55532c:45217 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-17T15:31:52,996 DEBUG [M:0;7a780d55532c:45217 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T15:31:52,996 INFO [M:0;7a780d55532c:45217 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:31:52,996 DEBUG [M:0;7a780d55532c:45217 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:31:52,996 DEBUG [M:0;7a780d55532c:45217 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T15:31:52,996 DEBUG [M:0;7a780d55532c:45217 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:31:52,997 INFO [M:0;7a780d55532c:45217 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.54 KB heapSize=54.91 KB 2024-11-17T15:31:53,012 DEBUG [M:0;7a780d55532c:45217 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/06598f513dda491ca2c8da3c1c4b6c95 is 82, key is hbase:meta,,1/info:regioninfo/1731857462373/Put/seqid=0 2024-11-17T15:31:53,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34823 is added to blk_1073741850_1026 (size=5672) 2024-11-17T15:31:53,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741850_1026 (size=5672) 2024-11-17T15:31:53,016 INFO [M:0;7a780d55532c:45217 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/06598f513dda491ca2c8da3c1c4b6c95 2024-11-17T15:31:53,035 DEBUG [M:0;7a780d55532c:45217 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f4278ef387874ae4a974d9fd74546773 is 797, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731857462788/Put/seqid=0 2024-11-17T15:31:53,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34823 is added to blk_1073741851_1027 (size=7818) 2024-11-17T15:31:53,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741851_1027 (size=7818) 2024-11-17T15:31:53,040 INFO [M:0;7a780d55532c:45217 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.94 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f4278ef387874ae4a974d9fd74546773 2024-11-17T15:31:53,044 INFO [M:0;7a780d55532c:45217 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f4278ef387874ae4a974d9fd74546773 2024-11-17T15:31:53,058 DEBUG [M:0;7a780d55532c:45217 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1ed006cbc5504390ad508b89cf7933bd is 69, key is 7a780d55532c,40875,1731857461590/rs:state/1731857461827/Put/seqid=0 2024-11-17T15:31:53,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741852_1028 (size=5156) 2024-11-17T15:31:53,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34823 is added to blk_1073741852_1028 (size=5156) 2024-11-17T15:31:53,063 INFO [M:0;7a780d55532c:45217 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1ed006cbc5504390ad508b89cf7933bd 2024-11-17T15:31:53,080 DEBUG [M:0;7a780d55532c:45217 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/acd51781e74a4c64955c37fa3cbcbc56 is 52, key is load_balancer_on/state:d/1731857462417/Put/seqid=0 2024-11-17T15:31:53,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34823 is added to blk_1073741853_1029 (size=5056) 2024-11-17T15:31:53,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741853_1029 (size=5056) 2024-11-17T15:31:53,084 INFO [M:0;7a780d55532c:45217 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/acd51781e74a4c64955c37fa3cbcbc56 2024-11-17T15:31:53,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40875-0x101268d9ba70001, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T15:31:53,087 INFO [RS:0;7a780d55532c:40875 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T15:31:53,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40875-0x101268d9ba70001, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T15:31:53,087 INFO [RS:0;7a780d55532c:40875 {}] regionserver.HRegionServer(1031): Exiting; stopping=7a780d55532c,40875,1731857461590; zookeeper connection closed. 2024-11-17T15:31:53,087 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@63369f59 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@63369f59 2024-11-17T15:31:53,087 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-17T15:31:53,089 DEBUG [M:0;7a780d55532c:45217 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/06598f513dda491ca2c8da3c1c4b6c95 as hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/06598f513dda491ca2c8da3c1c4b6c95 2024-11-17T15:31:53,093 INFO [M:0;7a780d55532c:45217 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/06598f513dda491ca2c8da3c1c4b6c95, entries=8, sequenceid=121, filesize=5.5 K 2024-11-17T15:31:53,094 DEBUG [M:0;7a780d55532c:45217 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f4278ef387874ae4a974d9fd74546773 as hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f4278ef387874ae4a974d9fd74546773 2024-11-17T15:31:53,097 INFO [M:0;7a780d55532c:45217 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f4278ef387874ae4a974d9fd74546773 2024-11-17T15:31:53,097 INFO [M:0;7a780d55532c:45217 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f4278ef387874ae4a974d9fd74546773, entries=14, sequenceid=121, filesize=7.6 K 2024-11-17T15:31:53,098 DEBUG [M:0;7a780d55532c:45217 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1ed006cbc5504390ad508b89cf7933bd as hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1ed006cbc5504390ad508b89cf7933bd 2024-11-17T15:31:53,102 INFO [M:0;7a780d55532c:45217 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1ed006cbc5504390ad508b89cf7933bd, entries=1, sequenceid=121, filesize=5.0 K 2024-11-17T15:31:53,103 DEBUG [M:0;7a780d55532c:45217 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/acd51781e74a4c64955c37fa3cbcbc56 as hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/acd51781e74a4c64955c37fa3cbcbc56 2024-11-17T15:31:53,107 INFO [M:0;7a780d55532c:45217 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36851/user/jenkins/test-data/d5b5579b-3912-7aa8-25a3-602875448e09/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/acd51781e74a4c64955c37fa3cbcbc56, entries=1, sequenceid=121, filesize=4.9 K 2024-11-17T15:31:53,107 INFO [M:0;7a780d55532c:45217 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.54 KB/44590, heapSize ~54.85 KB/56168, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 111ms, sequenceid=121, compaction requested=false 2024-11-17T15:31:53,109 INFO [M:0;7a780d55532c:45217 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:31:53,109 DEBUG [M:0;7a780d55532c:45217 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731857512996Disabling compacts and flushes for region at 1731857512996Disabling writes for close at 1731857512996Obtaining lock to block concurrent updates at 1731857512997 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731857512997Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44590, getHeapSize=56168, getOffHeapSize=0, getCellsCount=140 at 1731857512997Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731857512998 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731857512998Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731857513011 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731857513011Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731857513021 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731857513035 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731857513035Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731857513044 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731857513057 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731857513057Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731857513067 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731857513080 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731857513080Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@75f33485: reopening flushed file at 1731857513088 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@385d6cf9: reopening flushed file at 1731857513093 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7415d2dc: reopening flushed file at 1731857513098 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5f5040af: reopening flushed file at 1731857513102 (+4 ms)Finished flush of dataSize ~43.54 KB/44590, heapSize ~54.85 KB/56168, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 111ms, sequenceid=121, compaction requested=false at 1731857513108 (+6 ms)Writing region close event to WAL at 1731857513109 (+1 ms)Closed at 1731857513109 2024-11-17T15:31:53,109 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:31:53,109 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:31:53,109 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:31:53,110 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:31:53,110 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:31:53,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36147 is added to blk_1073741830_1006 (size=52987) 2024-11-17T15:31:53,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34823 is added to blk_1073741830_1006 (size=52987) 2024-11-17T15:31:53,112 INFO [M:0;7a780d55532c:45217 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-17T15:31:53,112 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T15:31:53,112 INFO [M:0;7a780d55532c:45217 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45217 2024-11-17T15:31:53,112 INFO [M:0;7a780d55532c:45217 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T15:31:53,214 INFO [M:0;7a780d55532c:45217 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T15:31:53,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45217-0x101268d9ba70000, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T15:31:53,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45217-0x101268d9ba70000, quorum=127.0.0.1:63112, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T15:31:53,217 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@a0e8b8e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:31:53,217 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@26be96d6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T15:31:53,217 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T15:31:53,217 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@15cc44b6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T15:31:53,218 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18b44758{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/hadoop.log.dir/,STOPPED} 2024-11-17T15:31:53,219 WARN [BP-752870497-172.17.0.2-1731857460897 heartbeating to localhost/127.0.0.1:36851 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T15:31:53,219 WARN [BP-752870497-172.17.0.2-1731857460897 heartbeating to localhost/127.0.0.1:36851 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-752870497-172.17.0.2-1731857460897 (Datanode Uuid f3d9b4e4-4bd7-464f-9745-2ae466b3d823) service to localhost/127.0.0.1:36851 2024-11-17T15:31:53,219 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T15:31:53,219 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T15:31:53,220 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/cluster_55fdf521-ca31-d682-af57-8e9f6a5a76b0/data/data3/current/BP-752870497-172.17.0.2-1731857460897 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:31:53,220 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/cluster_55fdf521-ca31-d682-af57-8e9f6a5a76b0/data/data4/current/BP-752870497-172.17.0.2-1731857460897 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:31:53,220 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T15:31:53,222 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7fcc5296{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:31:53,222 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5e22a5cd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T15:31:53,222 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T15:31:53,222 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@325c1ff6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T15:31:53,222 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3995deff{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/hadoop.log.dir/,STOPPED} 2024-11-17T15:31:53,224 WARN [BP-752870497-172.17.0.2-1731857460897 heartbeating to localhost/127.0.0.1:36851 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T15:31:53,224 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T15:31:53,224 WARN [BP-752870497-172.17.0.2-1731857460897 heartbeating to localhost/127.0.0.1:36851 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-752870497-172.17.0.2-1731857460897 (Datanode Uuid f4ed5083-c823-4527-9187-182928e8dd70) service to localhost/127.0.0.1:36851 2024-11-17T15:31:53,224 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T15:31:53,224 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/cluster_55fdf521-ca31-d682-af57-8e9f6a5a76b0/data/data1/current/BP-752870497-172.17.0.2-1731857460897 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:31:53,225 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/cluster_55fdf521-ca31-d682-af57-8e9f6a5a76b0/data/data2/current/BP-752870497-172.17.0.2-1731857460897 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:31:53,225 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T15:31:53,231 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@424117a8{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T15:31:53,232 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4af78b23{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T15:31:53,232 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T15:31:53,232 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@582569f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T15:31:53,232 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5050f271{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/hadoop.log.dir/,STOPPED} 2024-11-17T15:31:53,238 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-17T15:31:53,254 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-17T15:31:53,263 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=205 (was 179) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:36851 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36851 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36851 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/7a780d55532c:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:36851 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:36851 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:36851 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36851 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36851 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=47 (was 110), ProcessCount=11 (was 11), AvailableMemoryMB=3612 (was 3659) 2024-11-17T15:31:53,270 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=205, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=47, ProcessCount=11, AvailableMemoryMB=3612 2024-11-17T15:31:53,271 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-17T15:31:53,271 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/hadoop.log.dir so I do NOT create it in target/test-data/843f9c9a-3e64-b471-4a0a-660356212342 2024-11-17T15:31:53,271 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/254de44f-24e8-c7e8-d625-90b60c4d62b2/hadoop.tmp.dir so I do NOT create it in target/test-data/843f9c9a-3e64-b471-4a0a-660356212342 2024-11-17T15:31:53,271 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/cluster_fea5238e-7690-69da-f68b-7e85fb014ef5, deleteOnExit=true 2024-11-17T15:31:53,271 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-17T15:31:53,271 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/test.cache.data in system properties and HBase conf 2024-11-17T15:31:53,271 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/hadoop.tmp.dir in system properties and HBase conf 2024-11-17T15:31:53,271 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/hadoop.log.dir in system properties and HBase conf 2024-11-17T15:31:53,271 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-17T15:31:53,271 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-17T15:31:53,271 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-17T15:31:53,271 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-17T15:31:53,272 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-17T15:31:53,272 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-17T15:31:53,272 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-17T15:31:53,272 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T15:31:53,272 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-17T15:31:53,272 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-17T15:31:53,272 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T15:31:53,272 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T15:31:53,272 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-17T15:31:53,272 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/nfs.dump.dir in system properties and HBase conf 2024-11-17T15:31:53,272 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/java.io.tmpdir in system properties and HBase conf 2024-11-17T15:31:53,272 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T15:31:53,272 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-17T15:31:53,272 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-17T15:31:53,285 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T15:31:53,348 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:31:53,353 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T15:31:53,354 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T15:31:53,354 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T15:31:53,354 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T15:31:53,355 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:31:53,355 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d2d8db5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/hadoop.log.dir/,AVAILABLE} 2024-11-17T15:31:53,356 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30d49282{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T15:31:53,469 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4b216a06{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/java.io.tmpdir/jetty-localhost-33539-hadoop-hdfs-3_4_1-tests_jar-_-any-18180198895250934622/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T15:31:53,470 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@732e2119{HTTP/1.1, (http/1.1)}{localhost:33539} 2024-11-17T15:31:53,470 INFO [Time-limited test {}] server.Server(415): Started @237391ms 2024-11-17T15:31:53,482 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T15:31:53,583 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:31:53,587 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T15:31:53,588 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T15:31:53,588 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T15:31:53,588 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T15:31:53,595 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@79974a7c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/hadoop.log.dir/,AVAILABLE} 2024-11-17T15:31:53,595 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69291528{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T15:31:53,713 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2ea3c6d5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/java.io.tmpdir/jetty-localhost-43859-hadoop-hdfs-3_4_1-tests_jar-_-any-5805543592332242074/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:31:53,714 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@624c2d5a{HTTP/1.1, (http/1.1)}{localhost:43859} 2024-11-17T15:31:53,714 INFO [Time-limited test {}] server.Server(415): Started @237636ms 2024-11-17T15:31:53,715 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T15:31:53,746 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:31:53,749 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T15:31:53,750 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T15:31:53,750 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T15:31:53,750 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T15:31:53,750 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3af484fb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/hadoop.log.dir/,AVAILABLE} 2024-11-17T15:31:53,750 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2c08daf8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T15:31:53,783 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:53,783 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:53,814 WARN [Thread-1965 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/cluster_fea5238e-7690-69da-f68b-7e85fb014ef5/data/data1/current/BP-171420599-172.17.0.2-1731857513292/current, will proceed with Du for space computation calculation, 2024-11-17T15:31:53,814 WARN [Thread-1966 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/cluster_fea5238e-7690-69da-f68b-7e85fb014ef5/data/data2/current/BP-171420599-172.17.0.2-1731857513292/current, will proceed with Du for space computation calculation, 2024-11-17T15:31:53,831 WARN [Thread-1944 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T15:31:53,833 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3b6c0d4eb59216eb with lease ID 0xe5ea923abf1a7be5: Processing first storage report for DS-3cd78747-8431-4c3c-a06b-1b5f03334338 from datanode DatanodeRegistration(127.0.0.1:35763, datanodeUuid=e2e49079-5c86-487f-97b6-e6ab5af79260, infoPort=41199, infoSecurePort=0, ipcPort=37099, storageInfo=lv=-57;cid=testClusterID;nsid=521578460;c=1731857513292) 2024-11-17T15:31:53,833 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3b6c0d4eb59216eb with lease ID 0xe5ea923abf1a7be5: from storage DS-3cd78747-8431-4c3c-a06b-1b5f03334338 node DatanodeRegistration(127.0.0.1:35763, datanodeUuid=e2e49079-5c86-487f-97b6-e6ab5af79260, infoPort=41199, infoSecurePort=0, ipcPort=37099, storageInfo=lv=-57;cid=testClusterID;nsid=521578460;c=1731857513292), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T15:31:53,833 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3b6c0d4eb59216eb with lease ID 0xe5ea923abf1a7be5: Processing first storage report for DS-bc88f52c-1d90-47d8-85f7-ed7c2c234e17 from datanode DatanodeRegistration(127.0.0.1:35763, datanodeUuid=e2e49079-5c86-487f-97b6-e6ab5af79260, infoPort=41199, infoSecurePort=0, ipcPort=37099, storageInfo=lv=-57;cid=testClusterID;nsid=521578460;c=1731857513292) 2024-11-17T15:31:53,833 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3b6c0d4eb59216eb with lease ID 0xe5ea923abf1a7be5: from storage DS-bc88f52c-1d90-47d8-85f7-ed7c2c234e17 node DatanodeRegistration(127.0.0.1:35763, datanodeUuid=e2e49079-5c86-487f-97b6-e6ab5af79260, infoPort=41199, infoSecurePort=0, ipcPort=37099, storageInfo=lv=-57;cid=testClusterID;nsid=521578460;c=1731857513292), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T15:31:53,841 INFO [regionserver/7a780d55532c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T15:31:53,866 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@24a90bdd{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/java.io.tmpdir/jetty-localhost-43175-hadoop-hdfs-3_4_1-tests_jar-_-any-4879740855289321354/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:31:53,866 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@506744b5{HTTP/1.1, (http/1.1)}{localhost:43175} 2024-11-17T15:31:53,866 INFO [Time-limited test {}] server.Server(415): Started @237788ms 2024-11-17T15:31:53,867 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T15:31:53,950 WARN [Thread-1992 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/cluster_fea5238e-7690-69da-f68b-7e85fb014ef5/data/data4/current/BP-171420599-172.17.0.2-1731857513292/current, will proceed with Du for space computation calculation, 2024-11-17T15:31:53,950 WARN [Thread-1991 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/cluster_fea5238e-7690-69da-f68b-7e85fb014ef5/data/data3/current/BP-171420599-172.17.0.2-1731857513292/current, will proceed with Du for space computation calculation, 2024-11-17T15:31:53,966 WARN [Thread-1980 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T15:31:53,968 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x73332be5edff996c with lease ID 0xe5ea923abf1a7be6: Processing first storage report for DS-3096f273-d2a6-4479-afc3-88b036ad39bc from datanode DatanodeRegistration(127.0.0.1:45075, datanodeUuid=3fc17354-b591-450a-b632-df2dee3612a6, infoPort=45857, infoSecurePort=0, ipcPort=33357, storageInfo=lv=-57;cid=testClusterID;nsid=521578460;c=1731857513292) 2024-11-17T15:31:53,968 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x73332be5edff996c with lease ID 0xe5ea923abf1a7be6: from storage DS-3096f273-d2a6-4479-afc3-88b036ad39bc node DatanodeRegistration(127.0.0.1:45075, datanodeUuid=3fc17354-b591-450a-b632-df2dee3612a6, infoPort=45857, infoSecurePort=0, ipcPort=33357, storageInfo=lv=-57;cid=testClusterID;nsid=521578460;c=1731857513292), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T15:31:53,969 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x73332be5edff996c with lease ID 0xe5ea923abf1a7be6: Processing first storage report for DS-6c8bbad8-2908-45b0-9d41-cc04d8cc46df from datanode DatanodeRegistration(127.0.0.1:45075, datanodeUuid=3fc17354-b591-450a-b632-df2dee3612a6, infoPort=45857, infoSecurePort=0, ipcPort=33357, storageInfo=lv=-57;cid=testClusterID;nsid=521578460;c=1731857513292) 2024-11-17T15:31:53,969 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x73332be5edff996c with lease ID 0xe5ea923abf1a7be6: from storage DS-6c8bbad8-2908-45b0-9d41-cc04d8cc46df node DatanodeRegistration(127.0.0.1:45075, datanodeUuid=3fc17354-b591-450a-b632-df2dee3612a6, infoPort=45857, infoSecurePort=0, ipcPort=33357, storageInfo=lv=-57;cid=testClusterID;nsid=521578460;c=1731857513292), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T15:31:53,988 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342 2024-11-17T15:31:53,992 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/cluster_fea5238e-7690-69da-f68b-7e85fb014ef5/zookeeper_0, clientPort=64124, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/cluster_fea5238e-7690-69da-f68b-7e85fb014ef5/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/cluster_fea5238e-7690-69da-f68b-7e85fb014ef5/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-17T15:31:53,993 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64124 2024-11-17T15:31:53,993 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:31:53,995 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:31:54,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741825_1001 (size=7) 2024-11-17T15:31:54,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741825_1001 (size=7) 2024-11-17T15:31:54,006 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb with version=8 2024-11-17T15:31:54,006 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/hbase-staging 2024-11-17T15:31:54,009 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7a780d55532c:0 server-side Connection retries=45 2024-11-17T15:31:54,009 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T15:31:54,009 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T15:31:54,009 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T15:31:54,009 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T15:31:54,009 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T15:31:54,009 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-17T15:31:54,009 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T15:31:54,010 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39309 2024-11-17T15:31:54,012 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39309 connecting to ZooKeeper ensemble=127.0.0.1:64124 2024-11-17T15:31:54,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:393090x0, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T15:31:54,020 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39309-0x101268e68960000 connected 2024-11-17T15:31:54,039 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:31:54,041 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:31:54,043 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39309-0x101268e68960000, quorum=127.0.0.1:64124, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T15:31:54,043 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb, hbase.cluster.distributed=false 2024-11-17T15:31:54,045 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39309-0x101268e68960000, quorum=127.0.0.1:64124, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T15:31:54,045 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39309 2024-11-17T15:31:54,046 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39309 2024-11-17T15:31:54,048 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39309 2024-11-17T15:31:54,049 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39309 2024-11-17T15:31:54,050 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39309 2024-11-17T15:31:54,066 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7a780d55532c:0 server-side Connection retries=45 2024-11-17T15:31:54,066 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T15:31:54,066 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T15:31:54,066 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T15:31:54,066 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T15:31:54,066 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T15:31:54,066 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-17T15:31:54,066 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T15:31:54,067 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35813 2024-11-17T15:31:54,068 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35813 connecting to ZooKeeper ensemble=127.0.0.1:64124 2024-11-17T15:31:54,069 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:31:54,070 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:31:54,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:358130x0, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T15:31:54,074 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35813-0x101268e68960001, quorum=127.0.0.1:64124, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T15:31:54,074 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35813-0x101268e68960001 connected 2024-11-17T15:31:54,075 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-17T15:31:54,075 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-17T15:31:54,076 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35813-0x101268e68960001, quorum=127.0.0.1:64124, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-17T15:31:54,077 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35813-0x101268e68960001, quorum=127.0.0.1:64124, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T15:31:54,077 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35813 2024-11-17T15:31:54,077 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35813 2024-11-17T15:31:54,077 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35813 2024-11-17T15:31:54,078 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35813 2024-11-17T15:31:54,078 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35813 2024-11-17T15:31:54,089 DEBUG [M:0;7a780d55532c:39309 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7a780d55532c:39309 2024-11-17T15:31:54,089 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7a780d55532c,39309,1731857514009 2024-11-17T15:31:54,091 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35813-0x101268e68960001, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T15:31:54,091 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39309-0x101268e68960000, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T15:31:54,092 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39309-0x101268e68960000, quorum=127.0.0.1:64124, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7a780d55532c,39309,1731857514009 2024-11-17T15:31:54,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35813-0x101268e68960001, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-17T15:31:54,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39309-0x101268e68960000, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:31:54,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35813-0x101268e68960001, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:31:54,094 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39309-0x101268e68960000, quorum=127.0.0.1:64124, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-17T15:31:54,095 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7a780d55532c,39309,1731857514009 from backup master directory 2024-11-17T15:31:54,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39309-0x101268e68960000, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7a780d55532c,39309,1731857514009 2024-11-17T15:31:54,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35813-0x101268e68960001, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T15:31:54,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39309-0x101268e68960000, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T15:31:54,096 WARN [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T15:31:54,096 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7a780d55532c,39309,1731857514009 2024-11-17T15:31:54,100 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/hbase.id] with ID: 9ad297a1-d46f-435d-b825-ea98c34e047c 2024-11-17T15:31:54,100 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/.tmp/hbase.id 2024-11-17T15:31:54,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741826_1002 (size=42) 2024-11-17T15:31:54,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741826_1002 (size=42) 2024-11-17T15:31:54,106 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/.tmp/hbase.id]:[hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/hbase.id] 2024-11-17T15:31:54,115 INFO [master/7a780d55532c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:31:54,115 INFO [master/7a780d55532c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-17T15:31:54,116 INFO [master/7a780d55532c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-17T15:31:54,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35813-0x101268e68960001, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:31:54,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39309-0x101268e68960000, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:31:54,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741827_1003 (size=196) 2024-11-17T15:31:54,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741827_1003 (size=196) 2024-11-17T15:31:54,124 INFO [master/7a780d55532c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T15:31:54,125 INFO [master/7a780d55532c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-17T15:31:54,125 INFO [master/7a780d55532c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T15:31:54,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741828_1004 (size=1189) 2024-11-17T15:31:54,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741828_1004 (size=1189) 2024-11-17T15:31:54,131 INFO [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/MasterData/data/master/store 2024-11-17T15:31:54,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741829_1005 (size=34) 2024-11-17T15:31:54,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741829_1005 (size=34) 2024-11-17T15:31:54,137 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:31:54,137 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T15:31:54,137 INFO [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:31:54,137 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:31:54,137 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T15:31:54,137 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:31:54,137 INFO [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:31:54,137 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731857514137Disabling compacts and flushes for region at 1731857514137Disabling writes for close at 1731857514137Writing region close event to WAL at 1731857514137Closed at 1731857514137 2024-11-17T15:31:54,138 WARN [master/7a780d55532c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/MasterData/data/master/store/.initializing 2024-11-17T15:31:54,138 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/MasterData/WALs/7a780d55532c,39309,1731857514009 2024-11-17T15:31:54,140 INFO [master/7a780d55532c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a780d55532c%2C39309%2C1731857514009, suffix=, logDir=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/MasterData/WALs/7a780d55532c,39309,1731857514009, archiveDir=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/MasterData/oldWALs, maxLogs=10 2024-11-17T15:31:54,140 INFO [master/7a780d55532c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C39309%2C1731857514009.1731857514140 2024-11-17T15:31:54,144 INFO [master/7a780d55532c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/MasterData/WALs/7a780d55532c,39309,1731857514009/7a780d55532c%2C39309%2C1731857514009.1731857514140 2024-11-17T15:31:54,145 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41199:41199),(127.0.0.1/127.0.0.1:45857:45857)] 2024-11-17T15:31:54,146 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-17T15:31:54,146 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:31:54,146 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:31:54,146 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:31:54,147 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:31:54,148 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-17T15:31:54,148 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:31:54,148 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:31:54,149 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:31:54,149 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-17T15:31:54,150 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:31:54,150 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T15:31:54,150 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:31:54,151 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-17T15:31:54,151 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:31:54,151 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T15:31:54,151 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:31:54,152 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-17T15:31:54,152 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:31:54,153 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T15:31:54,153 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:31:54,153 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:31:54,153 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:31:54,154 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:31:54,154 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:31:54,155 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-17T15:31:54,156 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:31:54,157 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T15:31:54,157 INFO [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=817671, jitterRate=0.03972327709197998}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-17T15:31:54,158 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731857514146Initializing all the Stores at 1731857514147 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857514147Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857514147Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857514147Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857514147Cleaning up temporary data from old regions at 1731857514154 (+7 ms)Region opened successfully at 1731857514158 (+4 ms) 2024-11-17T15:31:54,158 INFO [master/7a780d55532c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-17T15:31:54,161 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26366432, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7a780d55532c/172.17.0.2:0 2024-11-17T15:31:54,162 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-17T15:31:54,162 INFO [master/7a780d55532c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-17T15:31:54,162 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-17T15:31:54,162 INFO [master/7a780d55532c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-17T15:31:54,162 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-17T15:31:54,162 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-17T15:31:54,162 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-17T15:31:54,164 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-17T15:31:54,165 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39309-0x101268e68960000, quorum=127.0.0.1:64124, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-17T15:31:54,166 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-17T15:31:54,166 INFO [master/7a780d55532c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-17T15:31:54,167 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39309-0x101268e68960000, quorum=127.0.0.1:64124, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-17T15:31:54,169 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-17T15:31:54,169 INFO [master/7a780d55532c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-17T15:31:54,170 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39309-0x101268e68960000, quorum=127.0.0.1:64124, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-17T15:31:54,171 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-17T15:31:54,172 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39309-0x101268e68960000, quorum=127.0.0.1:64124, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-17T15:31:54,173 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-17T15:31:54,174 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39309-0x101268e68960000, quorum=127.0.0.1:64124, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-17T15:31:54,176 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-17T15:31:54,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39309-0x101268e68960000, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T15:31:54,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39309-0x101268e68960000, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:31:54,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35813-0x101268e68960001, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T15:31:54,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35813-0x101268e68960001, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:31:54,178 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7a780d55532c,39309,1731857514009, sessionid=0x101268e68960000, setting cluster-up flag (Was=false) 2024-11-17T15:31:54,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35813-0x101268e68960001, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:31:54,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39309-0x101268e68960000, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:31:54,186 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-17T15:31:54,187 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7a780d55532c,39309,1731857514009 2024-11-17T15:31:54,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39309-0x101268e68960000, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:31:54,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35813-0x101268e68960001, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:31:54,195 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-17T15:31:54,195 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7a780d55532c,39309,1731857514009 2024-11-17T15:31:54,196 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-17T15:31:54,198 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-17T15:31:54,198 INFO [master/7a780d55532c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-17T15:31:54,198 INFO [master/7a780d55532c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-17T15:31:54,198 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7a780d55532c,39309,1731857514009 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-17T15:31:54,199 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7a780d55532c:0, corePoolSize=5, maxPoolSize=5 2024-11-17T15:31:54,199 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7a780d55532c:0, corePoolSize=5, maxPoolSize=5 2024-11-17T15:31:54,199 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7a780d55532c:0, corePoolSize=5, maxPoolSize=5 2024-11-17T15:31:54,199 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7a780d55532c:0, corePoolSize=5, maxPoolSize=5 2024-11-17T15:31:54,199 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7a780d55532c:0, corePoolSize=10, maxPoolSize=10 2024-11-17T15:31:54,199 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:31:54,199 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7a780d55532c:0, corePoolSize=2, maxPoolSize=2 2024-11-17T15:31:54,199 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:31:54,201 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731857544201 2024-11-17T15:31:54,201 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-17T15:31:54,201 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-17T15:31:54,201 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T15:31:54,201 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-17T15:31:54,201 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-17T15:31:54,201 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-17T15:31:54,201 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-17T15:31:54,201 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-17T15:31:54,201 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:54,201 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-17T15:31:54,201 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-17T15:31:54,202 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-17T15:31:54,202 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-17T15:31:54,202 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-17T15:31:54,202 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:31:54,202 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-17T15:31:54,202 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.large.0-1731857514202,5,FailOnTimeoutGroup] 2024-11-17T15:31:54,202 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.small.0-1731857514202,5,FailOnTimeoutGroup] 2024-11-17T15:31:54,202 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:54,202 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-17T15:31:54,202 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:54,202 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:54,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741831_1007 (size=1321) 2024-11-17T15:31:54,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741831_1007 (size=1321) 2024-11-17T15:31:54,208 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-17T15:31:54,208 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb 2024-11-17T15:31:54,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741832_1008 (size=32) 2024-11-17T15:31:54,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741832_1008 (size=32) 2024-11-17T15:31:54,214 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:31:54,215 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T15:31:54,216 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T15:31:54,216 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:31:54,216 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:31:54,216 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T15:31:54,217 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T15:31:54,217 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:31:54,218 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:31:54,218 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T15:31:54,219 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T15:31:54,219 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:31:54,219 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:31:54,219 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T15:31:54,220 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T15:31:54,220 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:31:54,221 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:31:54,221 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T15:31:54,221 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/hbase/meta/1588230740 2024-11-17T15:31:54,222 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/hbase/meta/1588230740 2024-11-17T15:31:54,223 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T15:31:54,223 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T15:31:54,223 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T15:31:54,224 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T15:31:54,225 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T15:31:54,226 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=828104, jitterRate=0.05298927426338196}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T15:31:54,226 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731857514214Initializing all the Stores at 1731857514214Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857514214Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857514215 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857514215Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857514215Cleaning up temporary data from old regions at 1731857514223 (+8 ms)Region opened successfully at 1731857514226 (+3 ms) 2024-11-17T15:31:54,226 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T15:31:54,226 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T15:31:54,226 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T15:31:54,226 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T15:31:54,226 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T15:31:54,227 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T15:31:54,227 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731857514226Disabling compacts and flushes for region at 1731857514226Disabling writes for close at 1731857514226Writing region close event to WAL at 1731857514227 (+1 ms)Closed at 1731857514227 2024-11-17T15:31:54,228 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T15:31:54,228 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-17T15:31:54,228 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-17T15:31:54,229 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T15:31:54,230 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-17T15:31:54,280 INFO [RS:0;7a780d55532c:35813 {}] regionserver.HRegionServer(746): ClusterId : 9ad297a1-d46f-435d-b825-ea98c34e047c 2024-11-17T15:31:54,280 DEBUG [RS:0;7a780d55532c:35813 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-17T15:31:54,282 DEBUG [RS:0;7a780d55532c:35813 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-17T15:31:54,282 DEBUG [RS:0;7a780d55532c:35813 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-17T15:31:54,285 DEBUG [RS:0;7a780d55532c:35813 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-17T15:31:54,285 DEBUG [RS:0;7a780d55532c:35813 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@761c0c6a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7a780d55532c/172.17.0.2:0 2024-11-17T15:31:54,297 DEBUG [RS:0;7a780d55532c:35813 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7a780d55532c:35813 2024-11-17T15:31:54,297 INFO [RS:0;7a780d55532c:35813 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-17T15:31:54,297 INFO [RS:0;7a780d55532c:35813 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-17T15:31:54,297 DEBUG [RS:0;7a780d55532c:35813 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-17T15:31:54,298 INFO [RS:0;7a780d55532c:35813 {}] regionserver.HRegionServer(2659): reportForDuty to master=7a780d55532c,39309,1731857514009 with port=35813, startcode=1731857514065 2024-11-17T15:31:54,298 DEBUG [RS:0;7a780d55532c:35813 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-17T15:31:54,299 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41743, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-17T15:31:54,300 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39309 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7a780d55532c,35813,1731857514065 2024-11-17T15:31:54,300 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39309 {}] master.ServerManager(517): Registering regionserver=7a780d55532c,35813,1731857514065 2024-11-17T15:31:54,301 DEBUG [RS:0;7a780d55532c:35813 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb 2024-11-17T15:31:54,301 DEBUG [RS:0;7a780d55532c:35813 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41751 2024-11-17T15:31:54,301 DEBUG [RS:0;7a780d55532c:35813 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-17T15:31:54,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39309-0x101268e68960000, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T15:31:54,303 DEBUG [RS:0;7a780d55532c:35813 {}] zookeeper.ZKUtil(111): regionserver:35813-0x101268e68960001, quorum=127.0.0.1:64124, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7a780d55532c,35813,1731857514065 2024-11-17T15:31:54,303 WARN [RS:0;7a780d55532c:35813 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T15:31:54,303 INFO [RS:0;7a780d55532c:35813 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T15:31:54,304 DEBUG [RS:0;7a780d55532c:35813 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/WALs/7a780d55532c,35813,1731857514065 2024-11-17T15:31:54,304 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7a780d55532c,35813,1731857514065] 2024-11-17T15:31:54,307 INFO [RS:0;7a780d55532c:35813 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-17T15:31:54,308 INFO [RS:0;7a780d55532c:35813 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-17T15:31:54,309 INFO [RS:0;7a780d55532c:35813 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-17T15:31:54,309 INFO [RS:0;7a780d55532c:35813 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:54,309 INFO [RS:0;7a780d55532c:35813 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-17T15:31:54,309 INFO [RS:0;7a780d55532c:35813 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-17T15:31:54,310 INFO [RS:0;7a780d55532c:35813 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:54,310 DEBUG [RS:0;7a780d55532c:35813 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:31:54,310 DEBUG [RS:0;7a780d55532c:35813 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:31:54,310 DEBUG [RS:0;7a780d55532c:35813 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:31:54,310 DEBUG [RS:0;7a780d55532c:35813 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:31:54,310 DEBUG [RS:0;7a780d55532c:35813 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:31:54,310 DEBUG [RS:0;7a780d55532c:35813 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7a780d55532c:0, corePoolSize=2, maxPoolSize=2 2024-11-17T15:31:54,310 DEBUG [RS:0;7a780d55532c:35813 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:31:54,310 DEBUG [RS:0;7a780d55532c:35813 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:31:54,310 DEBUG [RS:0;7a780d55532c:35813 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:31:54,310 DEBUG [RS:0;7a780d55532c:35813 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:31:54,310 DEBUG [RS:0;7a780d55532c:35813 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:31:54,310 DEBUG [RS:0;7a780d55532c:35813 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:31:54,310 DEBUG [RS:0;7a780d55532c:35813 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7a780d55532c:0, corePoolSize=3, maxPoolSize=3 2024-11-17T15:31:54,310 DEBUG [RS:0;7a780d55532c:35813 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0, corePoolSize=3, maxPoolSize=3 2024-11-17T15:31:54,310 INFO [RS:0;7a780d55532c:35813 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:54,310 INFO [RS:0;7a780d55532c:35813 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:54,311 INFO [RS:0;7a780d55532c:35813 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:54,311 INFO [RS:0;7a780d55532c:35813 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:54,311 INFO [RS:0;7a780d55532c:35813 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:54,311 INFO [RS:0;7a780d55532c:35813 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,35813,1731857514065-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T15:31:54,325 INFO [RS:0;7a780d55532c:35813 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-17T15:31:54,325 INFO [RS:0;7a780d55532c:35813 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,35813,1731857514065-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:54,325 INFO [RS:0;7a780d55532c:35813 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:54,325 INFO [RS:0;7a780d55532c:35813 {}] regionserver.Replication(171): 7a780d55532c,35813,1731857514065 started 2024-11-17T15:31:54,339 INFO [RS:0;7a780d55532c:35813 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:54,339 INFO [RS:0;7a780d55532c:35813 {}] regionserver.HRegionServer(1482): Serving as 7a780d55532c,35813,1731857514065, RpcServer on 7a780d55532c/172.17.0.2:35813, sessionid=0x101268e68960001 2024-11-17T15:31:54,339 DEBUG [RS:0;7a780d55532c:35813 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-17T15:31:54,339 DEBUG [RS:0;7a780d55532c:35813 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7a780d55532c,35813,1731857514065 2024-11-17T15:31:54,339 DEBUG [RS:0;7a780d55532c:35813 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a780d55532c,35813,1731857514065' 2024-11-17T15:31:54,339 DEBUG [RS:0;7a780d55532c:35813 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-17T15:31:54,339 DEBUG [RS:0;7a780d55532c:35813 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-17T15:31:54,340 DEBUG [RS:0;7a780d55532c:35813 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-17T15:31:54,340 DEBUG [RS:0;7a780d55532c:35813 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-17T15:31:54,340 DEBUG [RS:0;7a780d55532c:35813 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7a780d55532c,35813,1731857514065 2024-11-17T15:31:54,340 DEBUG [RS:0;7a780d55532c:35813 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a780d55532c,35813,1731857514065' 2024-11-17T15:31:54,340 DEBUG [RS:0;7a780d55532c:35813 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-17T15:31:54,340 DEBUG [RS:0;7a780d55532c:35813 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-17T15:31:54,341 DEBUG [RS:0;7a780d55532c:35813 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-17T15:31:54,341 INFO [RS:0;7a780d55532c:35813 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-17T15:31:54,341 INFO [RS:0;7a780d55532c:35813 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-17T15:31:54,380 WARN [7a780d55532c:39309 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-17T15:31:54,442 INFO [RS:0;7a780d55532c:35813 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a780d55532c%2C35813%2C1731857514065, suffix=, logDir=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/WALs/7a780d55532c,35813,1731857514065, archiveDir=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/oldWALs, maxLogs=32 2024-11-17T15:31:54,443 INFO [RS:0;7a780d55532c:35813 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C35813%2C1731857514065.1731857514443 2024-11-17T15:31:54,448 INFO [RS:0;7a780d55532c:35813 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/WALs/7a780d55532c,35813,1731857514065/7a780d55532c%2C35813%2C1731857514065.1731857514443 2024-11-17T15:31:54,449 DEBUG [RS:0;7a780d55532c:35813 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45857:45857),(127.0.0.1/127.0.0.1:41199:41199)] 2024-11-17T15:31:54,631 DEBUG [7a780d55532c:39309 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-17T15:31:54,631 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7a780d55532c,35813,1731857514065 2024-11-17T15:31:54,633 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7a780d55532c,35813,1731857514065, state=OPENING 2024-11-17T15:31:54,634 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-17T15:31:54,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39309-0x101268e68960000, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:31:54,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35813-0x101268e68960001, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:31:54,636 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T15:31:54,636 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T15:31:54,636 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T15:31:54,636 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7a780d55532c,35813,1731857514065}] 2024-11-17T15:31:54,783 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:54,783 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:54,789 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-17T15:31:54,791 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50357, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-17T15:31:54,794 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-17T15:31:54,794 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T15:31:54,796 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a780d55532c%2C35813%2C1731857514065.meta, suffix=.meta, logDir=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/WALs/7a780d55532c,35813,1731857514065, archiveDir=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/oldWALs, maxLogs=32 2024-11-17T15:31:54,796 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C35813%2C1731857514065.meta.1731857514796.meta 2024-11-17T15:31:54,801 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/WALs/7a780d55532c,35813,1731857514065/7a780d55532c%2C35813%2C1731857514065.meta.1731857514796.meta 2024-11-17T15:31:54,808 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45857:45857),(127.0.0.1/127.0.0.1:41199:41199)] 2024-11-17T15:31:54,813 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-17T15:31:54,813 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-17T15:31:54,813 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-17T15:31:54,813 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-17T15:31:54,813 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-17T15:31:54,813 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:31:54,813 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-17T15:31:54,813 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-17T15:31:54,814 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T15:31:54,815 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T15:31:54,815 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:31:54,816 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:31:54,816 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T15:31:54,816 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T15:31:54,816 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:31:54,817 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:31:54,817 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T15:31:54,817 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T15:31:54,817 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:31:54,818 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:31:54,818 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T15:31:54,818 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T15:31:54,818 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:31:54,819 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:31:54,819 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T15:31:54,819 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/hbase/meta/1588230740 2024-11-17T15:31:54,820 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/hbase/meta/1588230740 2024-11-17T15:31:54,821 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T15:31:54,821 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T15:31:54,822 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T15:31:54,823 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T15:31:54,824 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=804324, jitterRate=0.022751644253730774}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T15:31:54,824 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-17T15:31:54,824 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731857514813Writing region info on filesystem at 1731857514813Initializing all the Stores at 1731857514814 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857514814Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857514814Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857514814Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857514814Cleaning up temporary data from old regions at 1731857514821 (+7 ms)Running coprocessor post-open hooks at 1731857514824 (+3 ms)Region opened successfully at 1731857514824 2024-11-17T15:31:54,825 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731857514789 2024-11-17T15:31:54,827 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-17T15:31:54,827 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-17T15:31:54,828 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7a780d55532c,35813,1731857514065 2024-11-17T15:31:54,829 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7a780d55532c,35813,1731857514065, state=OPEN 2024-11-17T15:31:54,837 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35813-0x101268e68960001, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T15:31:54,837 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39309-0x101268e68960000, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T15:31:54,837 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7a780d55532c,35813,1731857514065 2024-11-17T15:31:54,837 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T15:31:54,837 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T15:31:54,840 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-17T15:31:54,840 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7a780d55532c,35813,1731857514065 in 201 msec 2024-11-17T15:31:54,842 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-17T15:31:54,842 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 612 msec 2024-11-17T15:31:54,843 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T15:31:54,843 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-17T15:31:54,844 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T15:31:54,844 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7a780d55532c,35813,1731857514065, seqNum=-1] 2024-11-17T15:31:54,844 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T15:31:54,846 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53797, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T15:31:54,850 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 652 msec 2024-11-17T15:31:54,850 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731857514850, completionTime=-1 2024-11-17T15:31:54,850 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-17T15:31:54,850 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-17T15:31:54,852 INFO [master/7a780d55532c:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-17T15:31:54,852 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731857574852 2024-11-17T15:31:54,852 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731857634852 2024-11-17T15:31:54,852 INFO [master/7a780d55532c:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-17T15:31:54,852 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,39309,1731857514009-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:54,852 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,39309,1731857514009-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:54,852 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,39309,1731857514009-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:54,852 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7a780d55532c:39309, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:54,852 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:54,853 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:54,854 DEBUG [master/7a780d55532c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-17T15:31:54,856 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.760sec 2024-11-17T15:31:54,856 INFO [master/7a780d55532c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-17T15:31:54,856 INFO [master/7a780d55532c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-17T15:31:54,856 INFO [master/7a780d55532c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-17T15:31:54,856 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-17T15:31:54,856 INFO [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-17T15:31:54,856 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,39309,1731857514009-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T15:31:54,856 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,39309,1731857514009-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-17T15:31:54,858 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-17T15:31:54,858 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-17T15:31:54,858 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,39309,1731857514009-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:31:54,880 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5049ffa8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T15:31:54,880 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7a780d55532c,39309,-1 for getting cluster id 2024-11-17T15:31:54,880 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-17T15:31:54,881 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '9ad297a1-d46f-435d-b825-ea98c34e047c' 2024-11-17T15:31:54,881 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-17T15:31:54,882 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "9ad297a1-d46f-435d-b825-ea98c34e047c" 2024-11-17T15:31:54,882 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64453c1b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T15:31:54,882 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7a780d55532c,39309,-1] 2024-11-17T15:31:54,882 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-17T15:31:54,882 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:31:54,883 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46030, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-17T15:31:54,884 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d9a4037, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T15:31:54,884 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T15:31:54,885 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7a780d55532c,35813,1731857514065, seqNum=-1] 2024-11-17T15:31:54,886 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T15:31:54,886 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50806, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T15:31:54,888 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7a780d55532c,39309,1731857514009 2024-11-17T15:31:54,888 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:31:54,890 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-17T15:31:54,890 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-17T15:31:54,891 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 7a780d55532c,39309,1731857514009 2024-11-17T15:31:54,891 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@54cc079f 2024-11-17T15:31:54,892 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-17T15:31:54,893 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46046, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-17T15:31:54,893 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39309 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-17T15:31:54,893 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39309 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-17T15:31:54,893 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39309 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T15:31:54,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39309 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-17T15:31:54,896 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-17T15:31:54,896 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:31:54,896 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39309 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-17T15:31:54,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39309 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-17T15:31:54,897 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-17T15:31:54,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741835_1011 (size=381) 2024-11-17T15:31:54,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741835_1011 (size=381) 2024-11-17T15:31:54,906 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 72a877004756a59e58089feba8324f4a, NAME => 'TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb 2024-11-17T15:31:54,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741836_1012 (size=64) 2024-11-17T15:31:54,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741836_1012 (size=64) 2024-11-17T15:31:54,913 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:31:54,913 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 72a877004756a59e58089feba8324f4a, disabling compactions & flushes 2024-11-17T15:31:54,913 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a. 2024-11-17T15:31:54,913 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a. 2024-11-17T15:31:54,913 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a. after waiting 0 ms 2024-11-17T15:31:54,913 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a. 2024-11-17T15:31:54,913 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a. 2024-11-17T15:31:54,913 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 72a877004756a59e58089feba8324f4a: Waiting for close lock at 1731857514913Disabling compacts and flushes for region at 1731857514913Disabling writes for close at 1731857514913Writing region close event to WAL at 1731857514913Closed at 1731857514913 2024-11-17T15:31:54,914 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-17T15:31:54,914 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731857514914"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731857514914"}]},"ts":"1731857514914"} 2024-11-17T15:31:54,917 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-17T15:31:54,918 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-17T15:31:54,918 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731857514918"}]},"ts":"1731857514918"} 2024-11-17T15:31:54,920 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-17T15:31:54,920 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=72a877004756a59e58089feba8324f4a, ASSIGN}] 2024-11-17T15:31:54,921 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=72a877004756a59e58089feba8324f4a, ASSIGN 2024-11-17T15:31:54,922 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=72a877004756a59e58089feba8324f4a, ASSIGN; state=OFFLINE, location=7a780d55532c,35813,1731857514065; forceNewPlan=false, retain=false 2024-11-17T15:31:55,073 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=72a877004756a59e58089feba8324f4a, regionState=OPENING, regionLocation=7a780d55532c,35813,1731857514065 2024-11-17T15:31:55,075 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=72a877004756a59e58089feba8324f4a, ASSIGN because future has completed 2024-11-17T15:31:55,076 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 72a877004756a59e58089feba8324f4a, server=7a780d55532c,35813,1731857514065}] 2024-11-17T15:31:55,232 INFO [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a. 2024-11-17T15:31:55,232 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 72a877004756a59e58089feba8324f4a, NAME => 'TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a.', STARTKEY => '', ENDKEY => ''} 2024-11-17T15:31:55,232 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 72a877004756a59e58089feba8324f4a 2024-11-17T15:31:55,232 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:31:55,232 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 72a877004756a59e58089feba8324f4a 2024-11-17T15:31:55,232 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 72a877004756a59e58089feba8324f4a 2024-11-17T15:31:55,234 INFO [StoreOpener-72a877004756a59e58089feba8324f4a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 72a877004756a59e58089feba8324f4a 2024-11-17T15:31:55,235 INFO [StoreOpener-72a877004756a59e58089feba8324f4a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 72a877004756a59e58089feba8324f4a columnFamilyName info 2024-11-17T15:31:55,235 DEBUG [StoreOpener-72a877004756a59e58089feba8324f4a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:31:55,236 INFO [StoreOpener-72a877004756a59e58089feba8324f4a-1 {}] regionserver.HStore(327): Store=72a877004756a59e58089feba8324f4a/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T15:31:55,236 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 72a877004756a59e58089feba8324f4a 2024-11-17T15:31:55,236 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a 2024-11-17T15:31:55,237 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a 2024-11-17T15:31:55,237 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 72a877004756a59e58089feba8324f4a 2024-11-17T15:31:55,237 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 72a877004756a59e58089feba8324f4a 2024-11-17T15:31:55,238 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 72a877004756a59e58089feba8324f4a 2024-11-17T15:31:55,240 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T15:31:55,240 INFO [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 72a877004756a59e58089feba8324f4a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=713597, jitterRate=-0.09261557459831238}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-17T15:31:55,240 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 72a877004756a59e58089feba8324f4a 2024-11-17T15:31:55,241 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 72a877004756a59e58089feba8324f4a: Running coprocessor pre-open hook at 1731857515233Writing region info on filesystem at 1731857515233Initializing all the Stores at 1731857515233Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857515233Cleaning up temporary data from old regions at 1731857515237 (+4 ms)Running coprocessor post-open hooks at 1731857515240 (+3 ms)Region opened successfully at 1731857515241 (+1 ms) 2024-11-17T15:31:55,242 INFO [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a., pid=6, masterSystemTime=1731857515228 2024-11-17T15:31:55,244 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a. 2024-11-17T15:31:55,244 INFO [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a. 2024-11-17T15:31:55,245 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=72a877004756a59e58089feba8324f4a, regionState=OPEN, openSeqNum=2, regionLocation=7a780d55532c,35813,1731857514065 2024-11-17T15:31:55,247 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 72a877004756a59e58089feba8324f4a, server=7a780d55532c,35813,1731857514065 because future has completed 2024-11-17T15:31:55,250 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-17T15:31:55,250 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 72a877004756a59e58089feba8324f4a, server=7a780d55532c,35813,1731857514065 in 172 msec 2024-11-17T15:31:55,252 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-17T15:31:55,252 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=72a877004756a59e58089feba8324f4a, ASSIGN in 330 msec 2024-11-17T15:31:55,253 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-17T15:31:55,253 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731857515253"}]},"ts":"1731857515253"} 2024-11-17T15:31:55,255 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-17T15:31:55,256 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-17T15:31:55,257 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 363 msec 2024-11-17T15:31:55,784 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:55,784 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:56,785 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:56,785 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:57,785 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:57,785 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:57,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:57,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:57,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:57,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:57,807 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:57,807 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:57,807 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:57,807 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:57,819 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:57,820 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:57,820 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:57,820 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:57,820 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:57,820 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:57,823 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:57,823 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:57,823 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:57,825 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:58,329 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-17T15:31:58,330 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:58,330 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:58,330 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:58,330 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:58,330 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:58,330 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:58,331 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:58,331 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:58,347 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:58,347 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:58,348 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:58,348 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:58,348 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:58,348 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:58,351 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:58,352 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:58,352 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:58,354 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:31:58,786 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:58,786 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:59,786 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:31:59,786 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:00,307 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-17T15:32:00,307 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-17T15:32:00,787 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:00,787 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:01,379 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-17T15:32:01,379 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-17T15:32:01,380 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T15:32:01,788 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:01,788 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:02,788 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:02,788 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:03,789 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:03,789 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:04,789 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:04,789 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:04,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39309 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-17T15:32:04,907 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-17T15:32:04,907 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-17T15:32:04,909 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-17T15:32:04,909 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a. 2024-11-17T15:32:04,911 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a., hostname=7a780d55532c,35813,1731857514065, seqNum=2] 2024-11-17T15:32:04,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35813 {}] regionserver.HRegion(8855): Flush requested on 72a877004756a59e58089feba8324f4a 2024-11-17T15:32:04,922 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 72a877004756a59e58089feba8324f4a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-17T15:32:04,938 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/.tmp/info/fc140b36b6e4447ea9b590bad7a06ad8 is 1080, key is row0001/info:/1731857524912/Put/seqid=0 2024-11-17T15:32:04,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741837_1013 (size=12509) 2024-11-17T15:32:04,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741837_1013 (size=12509) 2024-11-17T15:32:04,944 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/.tmp/info/fc140b36b6e4447ea9b590bad7a06ad8 2024-11-17T15:32:04,951 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/.tmp/info/fc140b36b6e4447ea9b590bad7a06ad8 as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/fc140b36b6e4447ea9b590bad7a06ad8 2024-11-17T15:32:04,956 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/fc140b36b6e4447ea9b590bad7a06ad8, entries=7, sequenceid=11, filesize=12.2 K 2024-11-17T15:32:04,957 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=21.02 KB/21520 for 72a877004756a59e58089feba8324f4a in 34ms, sequenceid=11, compaction requested=false 2024-11-17T15:32:04,957 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 72a877004756a59e58089feba8324f4a: 2024-11-17T15:32:04,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35813 {}] regionserver.HRegion(8855): Flush requested on 72a877004756a59e58089feba8324f4a 2024-11-17T15:32:04,957 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 72a877004756a59e58089feba8324f4a 1/1 column families, dataSize=22.07 KB heapSize=23.88 KB 2024-11-17T15:32:04,961 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/.tmp/info/b9f73618fa7347729e29114219b8633e is 1080, key is row0008/info:/1731857524923/Put/seqid=0 2024-11-17T15:32:04,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741838_1014 (size=27607) 2024-11-17T15:32:04,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741838_1014 (size=27607) 2024-11-17T15:32:04,966 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.07 KB at sequenceid=35 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/.tmp/info/b9f73618fa7347729e29114219b8633e 2024-11-17T15:32:04,971 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/.tmp/info/b9f73618fa7347729e29114219b8633e as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/b9f73618fa7347729e29114219b8633e 2024-11-17T15:32:04,975 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/b9f73618fa7347729e29114219b8633e, entries=21, sequenceid=35, filesize=27.0 K 2024-11-17T15:32:04,976 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~22.07 KB/22596, heapSize ~23.86 KB/24432, currentSize=4.20 KB/4304 for 72a877004756a59e58089feba8324f4a in 19ms, sequenceid=35, compaction requested=false 2024-11-17T15:32:04,976 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 72a877004756a59e58089feba8324f4a: 2024-11-17T15:32:04,976 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.2 K, sizeToCheck=16.0 K 2024-11-17T15:32:04,976 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T15:32:04,976 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/b9f73618fa7347729e29114219b8633e because midkey is the same as first or last row 2024-11-17T15:32:05,790 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:05,790 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:06,791 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:06,791 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:06,882 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-17T15:32:06,883 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:06,883 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:06,883 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:06,883 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:06,884 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:06,884 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:06,884 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:06,885 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:06,903 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:06,903 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:06,903 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:06,903 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:06,904 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:06,904 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:06,907 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:06,907 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:06,907 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:06,909 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:06,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35813 {}] regionserver.HRegion(8855): Flush requested on 72a877004756a59e58089feba8324f4a 2024-11-17T15:32:06,969 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 72a877004756a59e58089feba8324f4a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-17T15:32:06,974 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/.tmp/info/9ee289565d5c4cf691072a9222bcbd4a is 1080, key is row0029/info:/1731857524958/Put/seqid=0 2024-11-17T15:32:06,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741839_1015 (size=12509) 2024-11-17T15:32:06,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741839_1015 (size=12509) 2024-11-17T15:32:06,980 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/.tmp/info/9ee289565d5c4cf691072a9222bcbd4a 2024-11-17T15:32:06,986 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/.tmp/info/9ee289565d5c4cf691072a9222bcbd4a as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/9ee289565d5c4cf691072a9222bcbd4a 2024-11-17T15:32:06,991 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/9ee289565d5c4cf691072a9222bcbd4a, entries=7, sequenceid=45, filesize=12.2 K 2024-11-17T15:32:06,992 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 72a877004756a59e58089feba8324f4a in 23ms, sequenceid=45, compaction requested=true 2024-11-17T15:32:06,992 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 72a877004756a59e58089feba8324f4a: 2024-11-17T15:32:06,992 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=51.4 K, sizeToCheck=16.0 K 2024-11-17T15:32:06,992 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T15:32:06,992 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/b9f73618fa7347729e29114219b8633e because midkey is the same as first or last row 2024-11-17T15:32:06,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 72a877004756a59e58089feba8324f4a:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T15:32:06,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35813 {}] regionserver.HRegion(8855): Flush requested on 72a877004756a59e58089feba8324f4a 2024-11-17T15:32:06,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T15:32:06,993 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T15:32:06,993 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 72a877004756a59e58089feba8324f4a 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-17T15:32:06,995 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 52625 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T15:32:06,995 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HStore(1541): 72a877004756a59e58089feba8324f4a/info is initiating minor compaction (all files) 2024-11-17T15:32:06,995 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 72a877004756a59e58089feba8324f4a/info in TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a. 2024-11-17T15:32:06,995 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/fc140b36b6e4447ea9b590bad7a06ad8, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/b9f73618fa7347729e29114219b8633e, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/9ee289565d5c4cf691072a9222bcbd4a] into tmpdir=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/.tmp, totalSize=51.4 K 2024-11-17T15:32:06,995 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.Compactor(225): Compacting fc140b36b6e4447ea9b590bad7a06ad8, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731857524912 2024-11-17T15:32:06,996 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.Compactor(225): Compacting b9f73618fa7347729e29114219b8633e, keycount=21, bloomtype=ROW, size=27.0 K, encoding=NONE, compression=NONE, seqNum=35, earliestPutTs=1731857524923 2024-11-17T15:32:06,996 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9ee289565d5c4cf691072a9222bcbd4a, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731857524958 2024-11-17T15:32:06,997 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/.tmp/info/02e144eee9ca441a89cb18ea04d94e43 is 1080, key is row0036/info:/1731857526970/Put/seqid=0 2024-11-17T15:32:07,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741840_1016 (size=20064) 2024-11-17T15:32:07,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741840_1016 (size=20064) 2024-11-17T15:32:07,007 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=62 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/.tmp/info/02e144eee9ca441a89cb18ea04d94e43 2024-11-17T15:32:07,012 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 72a877004756a59e58089feba8324f4a#info#compaction#61 average throughput is 17.96 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T15:32:07,012 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/.tmp/info/4216806c799a42e6a2dd4c656092e4f3 is 1080, key is row0001/info:/1731857524912/Put/seqid=0 2024-11-17T15:32:07,014 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/.tmp/info/02e144eee9ca441a89cb18ea04d94e43 as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/02e144eee9ca441a89cb18ea04d94e43 2024-11-17T15:32:07,020 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/02e144eee9ca441a89cb18ea04d94e43, entries=14, sequenceid=62, filesize=19.6 K 2024-11-17T15:32:07,021 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=12.61 KB/12912 for 72a877004756a59e58089feba8324f4a in 28ms, sequenceid=62, compaction requested=false 2024-11-17T15:32:07,021 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 72a877004756a59e58089feba8324f4a: 2024-11-17T15:32:07,021 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.0 K, sizeToCheck=16.0 K 2024-11-17T15:32:07,021 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T15:32:07,022 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/b9f73618fa7347729e29114219b8633e because midkey is the same as first or last row 2024-11-17T15:32:07,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35813 {}] regionserver.HRegion(8855): Flush requested on 72a877004756a59e58089feba8324f4a 2024-11-17T15:32:07,023 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 72a877004756a59e58089feba8324f4a 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-17T15:32:07,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741841_1017 (size=42824) 2024-11-17T15:32:07,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741841_1017 (size=42824) 2024-11-17T15:32:07,030 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/.tmp/info/c80d667656c34d199a9c2cd964a915f0 is 1080, key is row0050/info:/1731857526994/Put/seqid=0 2024-11-17T15:32:07,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741842_1018 (size=20064) 2024-11-17T15:32:07,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741842_1018 (size=20064) 2024-11-17T15:32:07,037 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/.tmp/info/4216806c799a42e6a2dd4c656092e4f3 as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/4216806c799a42e6a2dd4c656092e4f3 2024-11-17T15:32:07,037 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/.tmp/info/c80d667656c34d199a9c2cd964a915f0 2024-11-17T15:32:07,043 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/.tmp/info/c80d667656c34d199a9c2cd964a915f0 as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/c80d667656c34d199a9c2cd964a915f0 2024-11-17T15:32:07,043 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 72a877004756a59e58089feba8324f4a/info of 72a877004756a59e58089feba8324f4a into 4216806c799a42e6a2dd4c656092e4f3(size=41.8 K), total size for store is 61.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T15:32:07,044 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 72a877004756a59e58089feba8324f4a: 2024-11-17T15:32:07,044 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a., storeName=72a877004756a59e58089feba8324f4a/info, priority=13, startTime=1731857526992; duration=0sec 2024-11-17T15:32:07,044 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=61.4 K, sizeToCheck=16.0 K 2024-11-17T15:32:07,044 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T15:32:07,044 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/4216806c799a42e6a2dd4c656092e4f3 because midkey is the same as first or last row 2024-11-17T15:32:07,044 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=61.4 K, sizeToCheck=16.0 K 2024-11-17T15:32:07,044 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T15:32:07,044 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/4216806c799a42e6a2dd4c656092e4f3 because midkey is the same as first or last row 2024-11-17T15:32:07,044 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=61.4 K, sizeToCheck=16.0 K 2024-11-17T15:32:07,044 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T15:32:07,044 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/4216806c799a42e6a2dd4c656092e4f3 because midkey is the same as first or last row 2024-11-17T15:32:07,044 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T15:32:07,044 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 72a877004756a59e58089feba8324f4a:info 2024-11-17T15:32:07,048 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/c80d667656c34d199a9c2cd964a915f0, entries=14, sequenceid=79, filesize=19.6 K 2024-11-17T15:32:07,049 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=1.05 KB/1076 for 72a877004756a59e58089feba8324f4a in 26ms, sequenceid=79, compaction requested=true 2024-11-17T15:32:07,049 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 72a877004756a59e58089feba8324f4a: 2024-11-17T15:32:07,049 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=81.0 K, sizeToCheck=16.0 K 2024-11-17T15:32:07,049 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T15:32:07,049 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/4216806c799a42e6a2dd4c656092e4f3 because midkey is the same as first or last row 2024-11-17T15:32:07,049 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 72a877004756a59e58089feba8324f4a:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T15:32:07,049 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T15:32:07,049 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T15:32:07,050 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 82952 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T15:32:07,050 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HStore(1541): 72a877004756a59e58089feba8324f4a/info is initiating minor compaction (all files) 2024-11-17T15:32:07,050 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 72a877004756a59e58089feba8324f4a/info in TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a. 2024-11-17T15:32:07,050 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/4216806c799a42e6a2dd4c656092e4f3, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/02e144eee9ca441a89cb18ea04d94e43, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/c80d667656c34d199a9c2cd964a915f0] into tmpdir=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/.tmp, totalSize=81.0 K 2024-11-17T15:32:07,051 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4216806c799a42e6a2dd4c656092e4f3, keycount=35, bloomtype=ROW, size=41.8 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731857524912 2024-11-17T15:32:07,051 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.Compactor(225): Compacting 02e144eee9ca441a89cb18ea04d94e43, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=62, earliestPutTs=1731857526970 2024-11-17T15:32:07,051 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.Compactor(225): Compacting c80d667656c34d199a9c2cd964a915f0, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1731857526994 2024-11-17T15:32:07,062 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 72a877004756a59e58089feba8324f4a#info#compaction#63 average throughput is 21.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T15:32:07,063 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/.tmp/info/a54994361c5941d0bcb17b7c2035107b is 1080, key is row0001/info:/1731857524912/Put/seqid=0 2024-11-17T15:32:07,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741843_1019 (size=73224) 2024-11-17T15:32:07,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741843_1019 (size=73224) 2024-11-17T15:32:07,072 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/.tmp/info/a54994361c5941d0bcb17b7c2035107b as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/a54994361c5941d0bcb17b7c2035107b 2024-11-17T15:32:07,079 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 72a877004756a59e58089feba8324f4a/info of 72a877004756a59e58089feba8324f4a into a54994361c5941d0bcb17b7c2035107b(size=71.5 K), total size for store is 71.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T15:32:07,079 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 72a877004756a59e58089feba8324f4a: 2024-11-17T15:32:07,079 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a., storeName=72a877004756a59e58089feba8324f4a/info, priority=13, startTime=1731857527049; duration=0sec 2024-11-17T15:32:07,079 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.5 K, sizeToCheck=16.0 K 2024-11-17T15:32:07,079 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T15:32:07,079 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.5 K, sizeToCheck=16.0 K 2024-11-17T15:32:07,079 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T15:32:07,079 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.5 K, sizeToCheck=16.0 K 2024-11-17T15:32:07,079 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T15:32:07,081 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T15:32:07,081 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T15:32:07,081 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 72a877004756a59e58089feba8324f4a:info 2024-11-17T15:32:07,082 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39309 {}] assignment.AssignmentManager(1355): Split request from 7a780d55532c,35813,1731857514065, parent={ENCODED => 72a877004756a59e58089feba8324f4a, NAME => 'TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-17T15:32:07,087 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39309 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=7a780d55532c,35813,1731857514065 2024-11-17T15:32:07,091 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39309 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=72a877004756a59e58089feba8324f4a, daughterA=4a33dad81f9236978607e15daf66bfbd, daughterB=c1a50fd4c077917cdbb56a8a483912c5 2024-11-17T15:32:07,092 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=72a877004756a59e58089feba8324f4a, daughterA=4a33dad81f9236978607e15daf66bfbd, daughterB=c1a50fd4c077917cdbb56a8a483912c5 2024-11-17T15:32:07,092 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=72a877004756a59e58089feba8324f4a, daughterA=4a33dad81f9236978607e15daf66bfbd, daughterB=c1a50fd4c077917cdbb56a8a483912c5 2024-11-17T15:32:07,092 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=72a877004756a59e58089feba8324f4a, daughterA=4a33dad81f9236978607e15daf66bfbd, daughterB=c1a50fd4c077917cdbb56a8a483912c5 2024-11-17T15:32:07,099 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=72a877004756a59e58089feba8324f4a, UNASSIGN}] 2024-11-17T15:32:07,100 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=72a877004756a59e58089feba8324f4a, UNASSIGN 2024-11-17T15:32:07,102 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=72a877004756a59e58089feba8324f4a, regionState=CLOSING, regionLocation=7a780d55532c,35813,1731857514065 2024-11-17T15:32:07,104 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39309 {}] assignment.AssignmentManager(1535): Unable to acquire lock for regionNode state=CLOSING, location=7a780d55532c,35813,1731857514065, table=TestLogRolling-testLogRolling, region=72a877004756a59e58089feba8324f4a. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-17T15:32:07,104 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=72a877004756a59e58089feba8324f4a, UNASSIGN because future has completed 2024-11-17T15:32:07,105 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-17T15:32:07,105 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 72a877004756a59e58089feba8324f4a, server=7a780d55532c,35813,1731857514065}] 2024-11-17T15:32:07,263 INFO [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 72a877004756a59e58089feba8324f4a 2024-11-17T15:32:07,263 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-17T15:32:07,264 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 72a877004756a59e58089feba8324f4a, disabling compactions & flushes 2024-11-17T15:32:07,264 INFO [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a. 2024-11-17T15:32:07,264 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a. 2024-11-17T15:32:07,264 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a. after waiting 0 ms 2024-11-17T15:32:07,264 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a. 2024-11-17T15:32:07,264 INFO [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 72a877004756a59e58089feba8324f4a 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-17T15:32:07,268 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/.tmp/info/dae32ade9b9f4e8ba8e9e5fcf9efd74f is 1080, key is row0064/info:/1731857527024/Put/seqid=0 2024-11-17T15:32:07,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741844_1020 (size=6033) 2024-11-17T15:32:07,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741844_1020 (size=6033) 2024-11-17T15:32:07,273 INFO [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=85 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/.tmp/info/dae32ade9b9f4e8ba8e9e5fcf9efd74f 2024-11-17T15:32:07,278 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/.tmp/info/dae32ade9b9f4e8ba8e9e5fcf9efd74f as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/dae32ade9b9f4e8ba8e9e5fcf9efd74f 2024-11-17T15:32:07,283 INFO [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/dae32ade9b9f4e8ba8e9e5fcf9efd74f, entries=1, sequenceid=85, filesize=5.9 K 2024-11-17T15:32:07,284 INFO [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 72a877004756a59e58089feba8324f4a in 20ms, sequenceid=85, compaction requested=false 2024-11-17T15:32:07,285 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/fc140b36b6e4447ea9b590bad7a06ad8, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/b9f73618fa7347729e29114219b8633e, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/4216806c799a42e6a2dd4c656092e4f3, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/9ee289565d5c4cf691072a9222bcbd4a, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/02e144eee9ca441a89cb18ea04d94e43, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/c80d667656c34d199a9c2cd964a915f0] to archive 2024-11-17T15:32:07,286 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T15:32:07,287 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/fc140b36b6e4447ea9b590bad7a06ad8 to hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/archive/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/fc140b36b6e4447ea9b590bad7a06ad8 2024-11-17T15:32:07,288 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/b9f73618fa7347729e29114219b8633e to hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/archive/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/b9f73618fa7347729e29114219b8633e 2024-11-17T15:32:07,289 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/4216806c799a42e6a2dd4c656092e4f3 to hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/archive/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/4216806c799a42e6a2dd4c656092e4f3 2024-11-17T15:32:07,290 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/9ee289565d5c4cf691072a9222bcbd4a to hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/archive/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/9ee289565d5c4cf691072a9222bcbd4a 2024-11-17T15:32:07,291 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/02e144eee9ca441a89cb18ea04d94e43 to hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/archive/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/02e144eee9ca441a89cb18ea04d94e43 2024-11-17T15:32:07,292 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/c80d667656c34d199a9c2cd964a915f0 to hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/archive/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/c80d667656c34d199a9c2cd964a915f0 2024-11-17T15:32:07,297 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/recovered.edits/88.seqid, newMaxSeqId=88, maxSeqId=1 2024-11-17T15:32:07,298 INFO [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a. 2024-11-17T15:32:07,298 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 72a877004756a59e58089feba8324f4a: Waiting for close lock at 1731857527264Running coprocessor pre-close hooks at 1731857527264Disabling compacts and flushes for region at 1731857527264Disabling writes for close at 1731857527264Obtaining lock to block concurrent updates at 1731857527264Preparing flush snapshotting stores in 72a877004756a59e58089feba8324f4a at 1731857527264Finished memstore snapshotting TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731857527265 (+1 ms)Flushing stores of TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a. at 1731857527265Flushing 72a877004756a59e58089feba8324f4a/info: creating writer at 1731857527265Flushing 72a877004756a59e58089feba8324f4a/info: appending metadata at 1731857527268 (+3 ms)Flushing 72a877004756a59e58089feba8324f4a/info: closing flushed file at 1731857527268Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4ba1782b: reopening flushed file at 1731857527278 (+10 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 72a877004756a59e58089feba8324f4a in 20ms, sequenceid=85, compaction requested=false at 1731857527284 (+6 ms)Writing region close event to WAL at 1731857527294 (+10 ms)Running coprocessor post-close hooks at 1731857527298 (+4 ms)Closed at 1731857527298 2024-11-17T15:32:07,300 INFO [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 72a877004756a59e58089feba8324f4a 2024-11-17T15:32:07,301 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=72a877004756a59e58089feba8324f4a, regionState=CLOSED 2024-11-17T15:32:07,303 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 72a877004756a59e58089feba8324f4a, server=7a780d55532c,35813,1731857514065 because future has completed 2024-11-17T15:32:07,306 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-17T15:32:07,306 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 72a877004756a59e58089feba8324f4a, server=7a780d55532c,35813,1731857514065 in 199 msec 2024-11-17T15:32:07,308 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-17T15:32:07,308 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=72a877004756a59e58089feba8324f4a, UNASSIGN in 207 msec 2024-11-17T15:32:07,315 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:32:07,318 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 2 storefiles, region=72a877004756a59e58089feba8324f4a, threads=2 2024-11-17T15:32:07,320 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/dae32ade9b9f4e8ba8e9e5fcf9efd74f for region: 72a877004756a59e58089feba8324f4a 2024-11-17T15:32:07,320 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/a54994361c5941d0bcb17b7c2035107b for region: 72a877004756a59e58089feba8324f4a 2024-11-17T15:32:07,329 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/dae32ade9b9f4e8ba8e9e5fcf9efd74f, top=true 2024-11-17T15:32:07,334 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/TestLogRolling-testLogRolling=72a877004756a59e58089feba8324f4a-dae32ade9b9f4e8ba8e9e5fcf9efd74f for child: c1a50fd4c077917cdbb56a8a483912c5, parent: 72a877004756a59e58089feba8324f4a 2024-11-17T15:32:07,334 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/dae32ade9b9f4e8ba8e9e5fcf9efd74f for region: 72a877004756a59e58089feba8324f4a 2024-11-17T15:32:07,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741845_1021 (size=27) 2024-11-17T15:32:07,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741845_1021 (size=27) 2024-11-17T15:32:07,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741846_1022 (size=27) 2024-11-17T15:32:07,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741846_1022 (size=27) 2024-11-17T15:32:07,352 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/a54994361c5941d0bcb17b7c2035107b for region: 72a877004756a59e58089feba8324f4a 2024-11-17T15:32:07,354 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 72a877004756a59e58089feba8324f4a Daughter A: [hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/4a33dad81f9236978607e15daf66bfbd/info/a54994361c5941d0bcb17b7c2035107b.72a877004756a59e58089feba8324f4a] storefiles, Daughter B: [hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/a54994361c5941d0bcb17b7c2035107b.72a877004756a59e58089feba8324f4a, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/TestLogRolling-testLogRolling=72a877004756a59e58089feba8324f4a-dae32ade9b9f4e8ba8e9e5fcf9efd74f] storefiles. 2024-11-17T15:32:07,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741847_1023 (size=71) 2024-11-17T15:32:07,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741847_1023 (size=71) 2024-11-17T15:32:07,364 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:32:07,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741848_1024 (size=71) 2024-11-17T15:32:07,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741848_1024 (size=71) 2024-11-17T15:32:07,375 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:32:07,383 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/4a33dad81f9236978607e15daf66bfbd/recovered.edits/88.seqid, newMaxSeqId=88, maxSeqId=-1 2024-11-17T15:32:07,385 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/recovered.edits/88.seqid, newMaxSeqId=88, maxSeqId=-1 2024-11-17T15:32:07,387 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731857527386"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731857527386"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731857527386"}]},"ts":"1731857527386"} 2024-11-17T15:32:07,387 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731857527087.4a33dad81f9236978607e15daf66bfbd.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731857527386"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731857527386"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731857527386"}]},"ts":"1731857527386"} 2024-11-17T15:32:07,387 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731857527386"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731857527386"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731857527386"}]},"ts":"1731857527386"} 2024-11-17T15:32:07,404 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4a33dad81f9236978607e15daf66bfbd, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c1a50fd4c077917cdbb56a8a483912c5, ASSIGN}] 2024-11-17T15:32:07,405 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4a33dad81f9236978607e15daf66bfbd, ASSIGN 2024-11-17T15:32:07,405 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c1a50fd4c077917cdbb56a8a483912c5, ASSIGN 2024-11-17T15:32:07,406 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4a33dad81f9236978607e15daf66bfbd, ASSIGN; state=SPLITTING_NEW, location=7a780d55532c,35813,1731857514065; forceNewPlan=false, retain=false 2024-11-17T15:32:07,406 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c1a50fd4c077917cdbb56a8a483912c5, ASSIGN; state=SPLITTING_NEW, location=7a780d55532c,35813,1731857514065; forceNewPlan=false, retain=false 2024-11-17T15:32:07,557 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=c1a50fd4c077917cdbb56a8a483912c5, regionState=OPENING, regionLocation=7a780d55532c,35813,1731857514065 2024-11-17T15:32:07,557 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=4a33dad81f9236978607e15daf66bfbd, regionState=OPENING, regionLocation=7a780d55532c,35813,1731857514065 2024-11-17T15:32:07,559 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4a33dad81f9236978607e15daf66bfbd, ASSIGN because future has completed 2024-11-17T15:32:07,560 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4a33dad81f9236978607e15daf66bfbd, server=7a780d55532c,35813,1731857514065}] 2024-11-17T15:32:07,560 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c1a50fd4c077917cdbb56a8a483912c5, ASSIGN because future has completed 2024-11-17T15:32:07,561 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure c1a50fd4c077917cdbb56a8a483912c5, server=7a780d55532c,35813,1731857514065}] 2024-11-17T15:32:07,715 INFO [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731857527087.4a33dad81f9236978607e15daf66bfbd. 2024-11-17T15:32:07,715 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 4a33dad81f9236978607e15daf66bfbd, NAME => 'TestLogRolling-testLogRolling,,1731857527087.4a33dad81f9236978607e15daf66bfbd.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-17T15:32:07,715 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 4a33dad81f9236978607e15daf66bfbd 2024-11-17T15:32:07,715 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731857527087.4a33dad81f9236978607e15daf66bfbd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:32:07,715 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 4a33dad81f9236978607e15daf66bfbd 2024-11-17T15:32:07,715 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 4a33dad81f9236978607e15daf66bfbd 2024-11-17T15:32:07,717 INFO [StoreOpener-4a33dad81f9236978607e15daf66bfbd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 4a33dad81f9236978607e15daf66bfbd 2024-11-17T15:32:07,717 INFO [StoreOpener-4a33dad81f9236978607e15daf66bfbd-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4a33dad81f9236978607e15daf66bfbd columnFamilyName info 2024-11-17T15:32:07,717 DEBUG [StoreOpener-4a33dad81f9236978607e15daf66bfbd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:32:07,727 DEBUG [StoreOpener-4a33dad81f9236978607e15daf66bfbd-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/4a33dad81f9236978607e15daf66bfbd/info/a54994361c5941d0bcb17b7c2035107b.72a877004756a59e58089feba8324f4a->hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/a54994361c5941d0bcb17b7c2035107b-bottom 2024-11-17T15:32:07,728 INFO [StoreOpener-4a33dad81f9236978607e15daf66bfbd-1 {}] regionserver.HStore(327): Store=4a33dad81f9236978607e15daf66bfbd/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T15:32:07,728 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 4a33dad81f9236978607e15daf66bfbd 2024-11-17T15:32:07,729 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/4a33dad81f9236978607e15daf66bfbd 2024-11-17T15:32:07,730 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/4a33dad81f9236978607e15daf66bfbd 2024-11-17T15:32:07,730 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 4a33dad81f9236978607e15daf66bfbd 2024-11-17T15:32:07,730 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 4a33dad81f9236978607e15daf66bfbd 2024-11-17T15:32:07,732 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 4a33dad81f9236978607e15daf66bfbd 2024-11-17T15:32:07,732 INFO [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 4a33dad81f9236978607e15daf66bfbd; next sequenceid=89; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=722678, jitterRate=-0.08106750249862671}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-17T15:32:07,733 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4a33dad81f9236978607e15daf66bfbd 2024-11-17T15:32:07,733 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 4a33dad81f9236978607e15daf66bfbd: Running coprocessor pre-open hook at 1731857527716Writing region info on filesystem at 1731857527716Initializing all the Stores at 1731857527716Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857527716Cleaning up temporary data from old regions at 1731857527730 (+14 ms)Running coprocessor post-open hooks at 1731857527733 (+3 ms)Region opened successfully at 1731857527733 2024-11-17T15:32:07,734 INFO [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731857527087.4a33dad81f9236978607e15daf66bfbd., pid=12, masterSystemTime=1731857527712 2024-11-17T15:32:07,734 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 4a33dad81f9236978607e15daf66bfbd:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T15:32:07,734 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T15:32:07,734 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-17T15:32:07,735 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731857527087.4a33dad81f9236978607e15daf66bfbd. 2024-11-17T15:32:07,735 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HStore(1541): 4a33dad81f9236978607e15daf66bfbd/info is initiating minor compaction (all files) 2024-11-17T15:32:07,735 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4a33dad81f9236978607e15daf66bfbd/info in TestLogRolling-testLogRolling,,1731857527087.4a33dad81f9236978607e15daf66bfbd. 2024-11-17T15:32:07,735 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/4a33dad81f9236978607e15daf66bfbd/info/a54994361c5941d0bcb17b7c2035107b.72a877004756a59e58089feba8324f4a->hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/a54994361c5941d0bcb17b7c2035107b-bottom] into tmpdir=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/4a33dad81f9236978607e15daf66bfbd/.tmp, totalSize=71.5 K 2024-11-17T15:32:07,735 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.Compactor(225): Compacting a54994361c5941d0bcb17b7c2035107b.72a877004756a59e58089feba8324f4a, keycount=31, bloomtype=ROW, size=71.5 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1731857524912 2024-11-17T15:32:07,736 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731857527087.4a33dad81f9236978607e15daf66bfbd. 2024-11-17T15:32:07,736 INFO [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731857527087.4a33dad81f9236978607e15daf66bfbd. 2024-11-17T15:32:07,736 INFO [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5. 2024-11-17T15:32:07,736 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => c1a50fd4c077917cdbb56a8a483912c5, NAME => 'TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-17T15:32:07,737 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling c1a50fd4c077917cdbb56a8a483912c5 2024-11-17T15:32:07,737 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:32:07,737 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for c1a50fd4c077917cdbb56a8a483912c5 2024-11-17T15:32:07,737 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for c1a50fd4c077917cdbb56a8a483912c5 2024-11-17T15:32:07,737 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=4a33dad81f9236978607e15daf66bfbd, regionState=OPEN, openSeqNum=89, regionLocation=7a780d55532c,35813,1731857514065 2024-11-17T15:32:07,738 INFO [StoreOpener-c1a50fd4c077917cdbb56a8a483912c5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region c1a50fd4c077917cdbb56a8a483912c5 2024-11-17T15:32:07,739 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35813 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-17T15:32:07,739 INFO [StoreOpener-c1a50fd4c077917cdbb56a8a483912c5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c1a50fd4c077917cdbb56a8a483912c5 columnFamilyName info 2024-11-17T15:32:07,739 DEBUG [StoreOpener-c1a50fd4c077917cdbb56a8a483912c5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:32:07,739 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-17T15:32:07,739 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-17T15:32:07,739 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4a33dad81f9236978607e15daf66bfbd, server=7a780d55532c,35813,1731857514065 because future has completed 2024-11-17T15:32:07,747 DEBUG [StoreOpener-c1a50fd4c077917cdbb56a8a483912c5-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/TestLogRolling-testLogRolling=72a877004756a59e58089feba8324f4a-dae32ade9b9f4e8ba8e9e5fcf9efd74f 2024-11-17T15:32:07,752 DEBUG [StoreOpener-c1a50fd4c077917cdbb56a8a483912c5-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/a54994361c5941d0bcb17b7c2035107b.72a877004756a59e58089feba8324f4a->hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/a54994361c5941d0bcb17b7c2035107b-top 2024-11-17T15:32:07,752 INFO [StoreOpener-c1a50fd4c077917cdbb56a8a483912c5-1 {}] regionserver.HStore(327): Store=c1a50fd4c077917cdbb56a8a483912c5/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T15:32:07,752 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for c1a50fd4c077917cdbb56a8a483912c5 2024-11-17T15:32:07,753 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5 2024-11-17T15:32:07,754 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5 2024-11-17T15:32:07,754 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for c1a50fd4c077917cdbb56a8a483912c5 2024-11-17T15:32:07,754 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for c1a50fd4c077917cdbb56a8a483912c5 2024-11-17T15:32:07,756 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-11-17T15:32:07,756 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4a33dad81f9236978607e15daf66bfbd#info#compaction#65 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T15:32:07,756 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 4a33dad81f9236978607e15daf66bfbd, server=7a780d55532c,35813,1731857514065 in 194 msec 2024-11-17T15:32:07,756 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for c1a50fd4c077917cdbb56a8a483912c5 2024-11-17T15:32:07,756 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/4a33dad81f9236978607e15daf66bfbd/.tmp/info/582ae27b6be241e580517e8f3c50c8cb is 1080, key is row0001/info:/1731857524912/Put/seqid=0 2024-11-17T15:32:07,757 INFO [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened c1a50fd4c077917cdbb56a8a483912c5; next sequenceid=89; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=742052, jitterRate=-0.056433096528053284}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-17T15:32:07,757 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c1a50fd4c077917cdbb56a8a483912c5 2024-11-17T15:32:07,757 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for c1a50fd4c077917cdbb56a8a483912c5: Running coprocessor pre-open hook at 1731857527737Writing region info on filesystem at 1731857527737Initializing all the Stores at 1731857527738 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857527738Cleaning up temporary data from old regions at 1731857527754 (+16 ms)Running coprocessor post-open hooks at 1731857527757 (+3 ms)Region opened successfully at 1731857527757 2024-11-17T15:32:07,758 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4a33dad81f9236978607e15daf66bfbd, ASSIGN in 352 msec 2024-11-17T15:32:07,759 INFO [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5., pid=13, masterSystemTime=1731857527712 2024-11-17T15:32:07,759 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store c1a50fd4c077917cdbb56a8a483912c5:info, priority=-2147483648, current under compaction store size is 2 2024-11-17T15:32:07,759 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T15:32:07,759 DEBUG [RS:0;7a780d55532c:35813-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-17T15:32:07,759 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/hbase/meta/1588230740/.tmp/info/6f817f5ea13c4316bddab768f976c38d is 193, key is TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5./info:regioninfo/1731857527557/Put/seqid=0 2024-11-17T15:32:07,760 INFO [RS:0;7a780d55532c:35813-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5. 2024-11-17T15:32:07,760 DEBUG [RS:0;7a780d55532c:35813-longCompactions-0 {}] regionserver.HStore(1541): c1a50fd4c077917cdbb56a8a483912c5/info is initiating minor compaction (all files) 2024-11-17T15:32:07,760 INFO [RS:0;7a780d55532c:35813-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c1a50fd4c077917cdbb56a8a483912c5/info in TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5. 2024-11-17T15:32:07,761 INFO [RS:0;7a780d55532c:35813-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/a54994361c5941d0bcb17b7c2035107b.72a877004756a59e58089feba8324f4a->hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/a54994361c5941d0bcb17b7c2035107b-top, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/TestLogRolling-testLogRolling=72a877004756a59e58089feba8324f4a-dae32ade9b9f4e8ba8e9e5fcf9efd74f] into tmpdir=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp, totalSize=77.4 K 2024-11-17T15:32:07,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741849_1025 (size=70862) 2024-11-17T15:32:07,761 DEBUG [RS:0;7a780d55532c:35813-longCompactions-0 {}] compactions.Compactor(225): Compacting a54994361c5941d0bcb17b7c2035107b.72a877004756a59e58089feba8324f4a, keycount=31, bloomtype=ROW, size=71.5 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1731857524912 2024-11-17T15:32:07,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741849_1025 (size=70862) 2024-11-17T15:32:07,762 DEBUG [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5. 2024-11-17T15:32:07,762 INFO [RS_OPEN_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5. 2024-11-17T15:32:07,762 DEBUG [RS:0;7a780d55532c:35813-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=72a877004756a59e58089feba8324f4a-dae32ade9b9f4e8ba8e9e5fcf9efd74f, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1731857527024 2024-11-17T15:32:07,763 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=c1a50fd4c077917cdbb56a8a483912c5, regionState=OPEN, openSeqNum=89, regionLocation=7a780d55532c,35813,1731857514065 2024-11-17T15:32:07,766 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure c1a50fd4c077917cdbb56a8a483912c5, server=7a780d55532c,35813,1731857514065 because future has completed 2024-11-17T15:32:07,769 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/4a33dad81f9236978607e15daf66bfbd/.tmp/info/582ae27b6be241e580517e8f3c50c8cb as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/4a33dad81f9236978607e15daf66bfbd/info/582ae27b6be241e580517e8f3c50c8cb 2024-11-17T15:32:07,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741850_1026 (size=9847) 2024-11-17T15:32:07,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741850_1026 (size=9847) 2024-11-17T15:32:07,772 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-11-17T15:32:07,772 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure c1a50fd4c077917cdbb56a8a483912c5, server=7a780d55532c,35813,1731857514065 in 208 msec 2024-11-17T15:32:07,772 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/hbase/meta/1588230740/.tmp/info/6f817f5ea13c4316bddab768f976c38d 2024-11-17T15:32:07,775 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-17T15:32:07,775 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c1a50fd4c077917cdbb56a8a483912c5, ASSIGN in 368 msec 2024-11-17T15:32:07,775 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 4a33dad81f9236978607e15daf66bfbd/info of 4a33dad81f9236978607e15daf66bfbd into 582ae27b6be241e580517e8f3c50c8cb(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T15:32:07,775 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4a33dad81f9236978607e15daf66bfbd: 2024-11-17T15:32:07,776 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731857527087.4a33dad81f9236978607e15daf66bfbd., storeName=4a33dad81f9236978607e15daf66bfbd/info, priority=15, startTime=1731857527734; duration=0sec 2024-11-17T15:32:07,776 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T15:32:07,776 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4a33dad81f9236978607e15daf66bfbd:info 2024-11-17T15:32:07,777 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=72a877004756a59e58089feba8324f4a, daughterA=4a33dad81f9236978607e15daf66bfbd, daughterB=c1a50fd4c077917cdbb56a8a483912c5 in 687 msec 2024-11-17T15:32:07,788 INFO [RS:0;7a780d55532c:35813-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a50fd4c077917cdbb56a8a483912c5#info#compaction#67 average throughput is 3.08 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T15:32:07,788 DEBUG [RS:0;7a780d55532c:35813-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/8b4eb685df914825b32457ff86be01e6 is 1080, key is row0062/info:/1731857527020/Put/seqid=0 2024-11-17T15:32:07,790 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/hbase/meta/1588230740/.tmp/ns/71548b7326bd4458bec482acdffd09be is 43, key is default/ns:d/1731857514846/Put/seqid=0 2024-11-17T15:32:07,791 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:07,791 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:07,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741851_1027 (size=8359) 2024-11-17T15:32:07,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741851_1027 (size=8359) 2024-11-17T15:32:07,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741852_1028 (size=5153) 2024-11-17T15:32:07,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741852_1028 (size=5153) 2024-11-17T15:32:07,798 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/hbase/meta/1588230740/.tmp/ns/71548b7326bd4458bec482acdffd09be 2024-11-17T15:32:07,799 DEBUG [RS:0;7a780d55532c:35813-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/8b4eb685df914825b32457ff86be01e6 as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/8b4eb685df914825b32457ff86be01e6 2024-11-17T15:32:07,805 INFO [RS:0;7a780d55532c:35813-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 2 (all) file(s) in c1a50fd4c077917cdbb56a8a483912c5/info of c1a50fd4c077917cdbb56a8a483912c5 into 8b4eb685df914825b32457ff86be01e6(size=8.2 K), total size for store is 8.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T15:32:07,805 DEBUG [RS:0;7a780d55532c:35813-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c1a50fd4c077917cdbb56a8a483912c5: 2024-11-17T15:32:07,805 INFO [RS:0;7a780d55532c:35813-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5., storeName=c1a50fd4c077917cdbb56a8a483912c5/info, priority=14, startTime=1731857527759; duration=0sec 2024-11-17T15:32:07,805 DEBUG [RS:0;7a780d55532c:35813-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T15:32:07,805 DEBUG [RS:0;7a780d55532c:35813-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a50fd4c077917cdbb56a8a483912c5:info 2024-11-17T15:32:07,821 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/hbase/meta/1588230740/.tmp/table/5aa40e25dc6649fab548bb8ead24b6f3 is 65, key is TestLogRolling-testLogRolling/table:state/1731857515253/Put/seqid=0 2024-11-17T15:32:07,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741853_1029 (size=5340) 2024-11-17T15:32:07,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741853_1029 (size=5340) 2024-11-17T15:32:07,827 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/hbase/meta/1588230740/.tmp/table/5aa40e25dc6649fab548bb8ead24b6f3 2024-11-17T15:32:07,832 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/hbase/meta/1588230740/.tmp/info/6f817f5ea13c4316bddab768f976c38d as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/hbase/meta/1588230740/info/6f817f5ea13c4316bddab768f976c38d 2024-11-17T15:32:07,838 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/hbase/meta/1588230740/info/6f817f5ea13c4316bddab768f976c38d, entries=30, sequenceid=17, filesize=9.6 K 2024-11-17T15:32:07,839 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/hbase/meta/1588230740/.tmp/ns/71548b7326bd4458bec482acdffd09be as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/hbase/meta/1588230740/ns/71548b7326bd4458bec482acdffd09be 2024-11-17T15:32:07,843 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/hbase/meta/1588230740/ns/71548b7326bd4458bec482acdffd09be, entries=2, sequenceid=17, filesize=5.0 K 2024-11-17T15:32:07,844 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/hbase/meta/1588230740/.tmp/table/5aa40e25dc6649fab548bb8ead24b6f3 as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/hbase/meta/1588230740/table/5aa40e25dc6649fab548bb8ead24b6f3 2024-11-17T15:32:07,849 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/hbase/meta/1588230740/table/5aa40e25dc6649fab548bb8ead24b6f3, entries=2, sequenceid=17, filesize=5.2 K 2024-11-17T15:32:07,850 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 111ms, sequenceid=17, compaction requested=false 2024-11-17T15:32:07,850 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-17T15:32:08,792 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:08,792 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:09,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35813 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:50806 deadline: 1731857539026, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a. is not online on 7a780d55532c,35813,1731857514065 2024-11-17T15:32:09,050 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a., hostname=7a780d55532c,35813,1731857514065, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a., hostname=7a780d55532c,35813,1731857514065, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a. is not online on 7a780d55532c,35813,1731857514065 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-17T15:32:09,051 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a., hostname=7a780d55532c,35813,1731857514065, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a. is not online on 7a780d55532c,35813,1731857514065 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-17T15:32:09,051 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731857514893.72a877004756a59e58089feba8324f4a., hostname=7a780d55532c,35813,1731857514065, seqNum=2 from cache 2024-11-17T15:32:09,792 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:09,792 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:10,793 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:10,793 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:11,793 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:11,793 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:12,794 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:12,794 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:12,800 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-17T15:32:12,801 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:12,801 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:12,801 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:12,802 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:12,802 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:12,802 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:12,802 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:12,802 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:12,821 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:12,821 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:12,822 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:12,822 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:12,822 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:12,823 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:12,825 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:12,826 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:12,826 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:12,828 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T15:32:13,795 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:13,795 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:14,795 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:14,795 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:15,796 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:15,796 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:16,796 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:16,796 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:17,797 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:17,797 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:18,798 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:18,798 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:19,121 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0065', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5., hostname=7a780d55532c,35813,1731857514065, seqNum=89] 2024-11-17T15:32:19,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35813 {}] regionserver.HRegion(8855): Flush requested on c1a50fd4c077917cdbb56a8a483912c5 2024-11-17T15:32:19,133 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c1a50fd4c077917cdbb56a8a483912c5 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-17T15:32:19,138 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/1cd3e0becce84685bb68f2c09f5d41da is 1080, key is row0065/info:/1731857539122/Put/seqid=0 2024-11-17T15:32:19,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741854_1030 (size=12509) 2024-11-17T15:32:19,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741854_1030 (size=12509) 2024-11-17T15:32:19,164 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35813 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=c1a50fd4c077917cdbb56a8a483912c5, server=7a780d55532c,35813,1731857514065 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-17T15:32:19,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35813 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:50806 deadline: 1731857549164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=c1a50fd4c077917cdbb56a8a483912c5, server=7a780d55532c,35813,1731857514065 2024-11-17T15:32:19,165 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5., hostname=7a780d55532c,35813,1731857514065, seqNum=89 , the old value is region=TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5., hostname=7a780d55532c,35813,1731857514065, seqNum=89, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=c1a50fd4c077917cdbb56a8a483912c5, server=7a780d55532c,35813,1731857514065 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-17T15:32:19,165 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5., hostname=7a780d55532c,35813,1731857514065, seqNum=89 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=c1a50fd4c077917cdbb56a8a483912c5, server=7a780d55532c,35813,1731857514065 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-17T15:32:19,165 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5., hostname=7a780d55532c,35813,1731857514065, seqNum=89 because the exception is null or not the one we care about 2024-11-17T15:32:19,544 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=99 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/1cd3e0becce84685bb68f2c09f5d41da 2024-11-17T15:32:19,551 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/1cd3e0becce84685bb68f2c09f5d41da as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/1cd3e0becce84685bb68f2c09f5d41da 2024-11-17T15:32:19,556 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/1cd3e0becce84685bb68f2c09f5d41da, entries=7, sequenceid=99, filesize=12.2 K 2024-11-17T15:32:19,557 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for c1a50fd4c077917cdbb56a8a483912c5 in 424ms, sequenceid=99, compaction requested=false 2024-11-17T15:32:19,557 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c1a50fd4c077917cdbb56a8a483912c5: 2024-11-17T15:32:19,798 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:19,798 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:20,799 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:20,799 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:21,799 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:21,799 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:22,800 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:22,800 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:23,801 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:23,801 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:23,988 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-17T15:32:24,801 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:24,801 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:25,802 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:25,802 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:26,802 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:26,802 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:27,803 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:27,803 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:28,803 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:28,803 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:29,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35813 {}] regionserver.HRegion(8855): Flush requested on c1a50fd4c077917cdbb56a8a483912c5 2024-11-17T15:32:29,217 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c1a50fd4c077917cdbb56a8a483912c5 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-17T15:32:29,222 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/f0591d7295d143efb78d1aa10490cbe1 is 1080, key is row0072/info:/1731857539134/Put/seqid=0 2024-11-17T15:32:29,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741855_1031 (size=29761) 2024-11-17T15:32:29,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741855_1031 (size=29761) 2024-11-17T15:32:29,227 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/f0591d7295d143efb78d1aa10490cbe1 2024-11-17T15:32:29,232 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/f0591d7295d143efb78d1aa10490cbe1 as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/f0591d7295d143efb78d1aa10490cbe1 2024-11-17T15:32:29,237 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/f0591d7295d143efb78d1aa10490cbe1, entries=23, sequenceid=125, filesize=29.1 K 2024-11-17T15:32:29,238 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for c1a50fd4c077917cdbb56a8a483912c5 in 21ms, sequenceid=125, compaction requested=true 2024-11-17T15:32:29,238 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c1a50fd4c077917cdbb56a8a483912c5: 2024-11-17T15:32:29,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a50fd4c077917cdbb56a8a483912c5:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T15:32:29,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T15:32:29,238 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T15:32:29,239 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 50629 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T15:32:29,239 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HStore(1541): c1a50fd4c077917cdbb56a8a483912c5/info is initiating minor compaction (all files) 2024-11-17T15:32:29,239 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c1a50fd4c077917cdbb56a8a483912c5/info in TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5. 2024-11-17T15:32:29,239 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/8b4eb685df914825b32457ff86be01e6, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/1cd3e0becce84685bb68f2c09f5d41da, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/f0591d7295d143efb78d1aa10490cbe1] into tmpdir=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp, totalSize=49.4 K 2024-11-17T15:32:29,240 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8b4eb685df914825b32457ff86be01e6, keycount=3, bloomtype=ROW, size=8.2 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1731857527020 2024-11-17T15:32:29,240 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1cd3e0becce84685bb68f2c09f5d41da, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1731857539122 2024-11-17T15:32:29,240 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.Compactor(225): Compacting f0591d7295d143efb78d1aa10490cbe1, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=125, earliestPutTs=1731857539134 2024-11-17T15:32:29,249 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a50fd4c077917cdbb56a8a483912c5#info#compaction#72 average throughput is 33.86 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T15:32:29,249 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/9c295373a667476d9cc9217fe20b1bd4 is 1080, key is row0062/info:/1731857527020/Put/seqid=0 2024-11-17T15:32:29,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741856_1032 (size=40835) 2024-11-17T15:32:29,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741856_1032 (size=40835) 2024-11-17T15:32:29,259 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/9c295373a667476d9cc9217fe20b1bd4 as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/9c295373a667476d9cc9217fe20b1bd4 2024-11-17T15:32:29,263 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c1a50fd4c077917cdbb56a8a483912c5/info of c1a50fd4c077917cdbb56a8a483912c5 into 9c295373a667476d9cc9217fe20b1bd4(size=39.9 K), total size for store is 39.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T15:32:29,264 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c1a50fd4c077917cdbb56a8a483912c5: 2024-11-17T15:32:29,264 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5., storeName=c1a50fd4c077917cdbb56a8a483912c5/info, priority=13, startTime=1731857549238; duration=0sec 2024-11-17T15:32:29,264 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T15:32:29,264 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a50fd4c077917cdbb56a8a483912c5:info 2024-11-17T15:32:29,804 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:29,804 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:30,805 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:30,805 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:31,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35813 {}] regionserver.HRegion(8855): Flush requested on c1a50fd4c077917cdbb56a8a483912c5 2024-11-17T15:32:31,230 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c1a50fd4c077917cdbb56a8a483912c5 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-17T15:32:31,234 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/665d183b190342359598e1173014f8dc is 1080, key is row0095/info:/1731857549219/Put/seqid=0 2024-11-17T15:32:31,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741857_1033 (size=12515) 2024-11-17T15:32:31,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741857_1033 (size=12515) 2024-11-17T15:32:31,243 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=136 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/665d183b190342359598e1173014f8dc 2024-11-17T15:32:31,248 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/665d183b190342359598e1173014f8dc as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/665d183b190342359598e1173014f8dc 2024-11-17T15:32:31,254 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/665d183b190342359598e1173014f8dc, entries=7, sequenceid=136, filesize=12.2 K 2024-11-17T15:32:31,255 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=15.76 KB/16140 for c1a50fd4c077917cdbb56a8a483912c5 in 25ms, sequenceid=136, compaction requested=false 2024-11-17T15:32:31,255 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c1a50fd4c077917cdbb56a8a483912c5: 2024-11-17T15:32:31,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35813 {}] regionserver.HRegion(8855): Flush requested on c1a50fd4c077917cdbb56a8a483912c5 2024-11-17T15:32:31,257 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c1a50fd4c077917cdbb56a8a483912c5 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-17T15:32:31,261 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/016773c0f2fc48e680bedde6713c7a44 is 1080, key is row0102/info:/1731857551231/Put/seqid=0 2024-11-17T15:32:31,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741858_1034 (size=22238) 2024-11-17T15:32:31,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741858_1034 (size=22238) 2024-11-17T15:32:31,266 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/016773c0f2fc48e680bedde6713c7a44 2024-11-17T15:32:31,272 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/016773c0f2fc48e680bedde6713c7a44 as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/016773c0f2fc48e680bedde6713c7a44 2024-11-17T15:32:31,276 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/016773c0f2fc48e680bedde6713c7a44, entries=16, sequenceid=155, filesize=21.7 K 2024-11-17T15:32:31,277 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=11.56 KB/11836 for c1a50fd4c077917cdbb56a8a483912c5 in 20ms, sequenceid=155, compaction requested=true 2024-11-17T15:32:31,277 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c1a50fd4c077917cdbb56a8a483912c5: 2024-11-17T15:32:31,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a50fd4c077917cdbb56a8a483912c5:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T15:32:31,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T15:32:31,277 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T15:32:31,278 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 75588 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T15:32:31,278 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HStore(1541): c1a50fd4c077917cdbb56a8a483912c5/info is initiating minor compaction (all files) 2024-11-17T15:32:31,278 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c1a50fd4c077917cdbb56a8a483912c5/info in TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5. 2024-11-17T15:32:31,279 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/9c295373a667476d9cc9217fe20b1bd4, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/665d183b190342359598e1173014f8dc, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/016773c0f2fc48e680bedde6713c7a44] into tmpdir=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp, totalSize=73.8 K 2024-11-17T15:32:31,279 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9c295373a667476d9cc9217fe20b1bd4, keycount=33, bloomtype=ROW, size=39.9 K, encoding=NONE, compression=NONE, seqNum=125, earliestPutTs=1731857527020 2024-11-17T15:32:31,279 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.Compactor(225): Compacting 665d183b190342359598e1173014f8dc, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1731857549219 2024-11-17T15:32:31,279 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.Compactor(225): Compacting 016773c0f2fc48e680bedde6713c7a44, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1731857551231 2024-11-17T15:32:31,288 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a50fd4c077917cdbb56a8a483912c5#info#compaction#75 average throughput is 57.46 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T15:32:31,288 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/878ea5e89f4e4b12a58ad0637cfba07f is 1080, key is row0062/info:/1731857527020/Put/seqid=0 2024-11-17T15:32:31,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741859_1035 (size=65798) 2024-11-17T15:32:31,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741859_1035 (size=65798) 2024-11-17T15:32:31,298 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/878ea5e89f4e4b12a58ad0637cfba07f as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/878ea5e89f4e4b12a58ad0637cfba07f 2024-11-17T15:32:31,303 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c1a50fd4c077917cdbb56a8a483912c5/info of c1a50fd4c077917cdbb56a8a483912c5 into 878ea5e89f4e4b12a58ad0637cfba07f(size=64.3 K), total size for store is 64.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T15:32:31,303 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c1a50fd4c077917cdbb56a8a483912c5: 2024-11-17T15:32:31,303 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5., storeName=c1a50fd4c077917cdbb56a8a483912c5/info, priority=13, startTime=1731857551277; duration=0sec 2024-11-17T15:32:31,303 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T15:32:31,303 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a50fd4c077917cdbb56a8a483912c5:info 2024-11-17T15:32:31,805 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:31,805 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:32,806 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:32,806 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:33,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35813 {}] regionserver.HRegion(8855): Flush requested on c1a50fd4c077917cdbb56a8a483912c5 2024-11-17T15:32:33,276 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c1a50fd4c077917cdbb56a8a483912c5 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-17T15:32:33,280 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/eba922b28154423db1971327f84fda43 is 1080, key is row0118/info:/1731857551258/Put/seqid=0 2024-11-17T15:32:33,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741860_1036 (size=17906) 2024-11-17T15:32:33,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741860_1036 (size=17906) 2024-11-17T15:32:33,286 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/eba922b28154423db1971327f84fda43 2024-11-17T15:32:33,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/eba922b28154423db1971327f84fda43 as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/eba922b28154423db1971327f84fda43 2024-11-17T15:32:33,295 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/eba922b28154423db1971327f84fda43, entries=12, sequenceid=171, filesize=17.5 K 2024-11-17T15:32:33,296 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=11.56 KB/11836 for c1a50fd4c077917cdbb56a8a483912c5 in 20ms, sequenceid=171, compaction requested=false 2024-11-17T15:32:33,296 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c1a50fd4c077917cdbb56a8a483912c5: 2024-11-17T15:32:33,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35813 {}] regionserver.HRegion(8855): Flush requested on c1a50fd4c077917cdbb56a8a483912c5 2024-11-17T15:32:33,296 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c1a50fd4c077917cdbb56a8a483912c5 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-17T15:32:33,300 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/ba474e12bcbd4c6d9b866cf905e2e957 is 1080, key is row0130/info:/1731857553277/Put/seqid=0 2024-11-17T15:32:33,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741861_1037 (size=17906) 2024-11-17T15:32:33,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741861_1037 (size=17906) 2024-11-17T15:32:33,320 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=186 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/ba474e12bcbd4c6d9b866cf905e2e957 2024-11-17T15:32:33,325 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/ba474e12bcbd4c6d9b866cf905e2e957 as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/ba474e12bcbd4c6d9b866cf905e2e957 2024-11-17T15:32:33,330 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/ba474e12bcbd4c6d9b866cf905e2e957, entries=12, sequenceid=186, filesize=17.5 K 2024-11-17T15:32:33,331 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=12.61 KB/12912 for c1a50fd4c077917cdbb56a8a483912c5 in 35ms, sequenceid=186, compaction requested=true 2024-11-17T15:32:33,331 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c1a50fd4c077917cdbb56a8a483912c5: 2024-11-17T15:32:33,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a50fd4c077917cdbb56a8a483912c5:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T15:32:33,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T15:32:33,331 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T15:32:33,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35813 {}] regionserver.HRegion(8855): Flush requested on c1a50fd4c077917cdbb56a8a483912c5 2024-11-17T15:32:33,332 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c1a50fd4c077917cdbb56a8a483912c5 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-17T15:32:33,333 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101610 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T15:32:33,333 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HStore(1541): c1a50fd4c077917cdbb56a8a483912c5/info is initiating minor compaction (all files) 2024-11-17T15:32:33,333 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c1a50fd4c077917cdbb56a8a483912c5/info in TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5. 2024-11-17T15:32:33,333 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/878ea5e89f4e4b12a58ad0637cfba07f, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/eba922b28154423db1971327f84fda43, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/ba474e12bcbd4c6d9b866cf905e2e957] into tmpdir=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp, totalSize=99.2 K 2024-11-17T15:32:33,333 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.Compactor(225): Compacting 878ea5e89f4e4b12a58ad0637cfba07f, keycount=56, bloomtype=ROW, size=64.3 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1731857527020 2024-11-17T15:32:33,334 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.Compactor(225): Compacting eba922b28154423db1971327f84fda43, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1731857551258 2024-11-17T15:32:33,334 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.Compactor(225): Compacting ba474e12bcbd4c6d9b866cf905e2e957, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=186, earliestPutTs=1731857553277 2024-11-17T15:32:33,336 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/6bef22486423469abf498cfde8082f7a is 1080, key is row0142/info:/1731857553297/Put/seqid=0 2024-11-17T15:32:33,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741862_1038 (size=19000) 2024-11-17T15:32:33,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741862_1038 (size=19000) 2024-11-17T15:32:33,343 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/6bef22486423469abf498cfde8082f7a 2024-11-17T15:32:33,346 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a50fd4c077917cdbb56a8a483912c5#info#compaction#79 average throughput is 82.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T15:32:33,347 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/fd4342621a234e25b42da498ed968750 is 1080, key is row0062/info:/1731857527020/Put/seqid=0 2024-11-17T15:32:33,349 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/6bef22486423469abf498cfde8082f7a as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/6bef22486423469abf498cfde8082f7a 2024-11-17T15:32:33,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741863_1039 (size=91849) 2024-11-17T15:32:33,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741863_1039 (size=91849) 2024-11-17T15:32:33,355 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/6bef22486423469abf498cfde8082f7a, entries=13, sequenceid=202, filesize=18.6 K 2024-11-17T15:32:33,356 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=6.30 KB/6456 for c1a50fd4c077917cdbb56a8a483912c5 in 24ms, sequenceid=202, compaction requested=false 2024-11-17T15:32:33,356 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/fd4342621a234e25b42da498ed968750 as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/fd4342621a234e25b42da498ed968750 2024-11-17T15:32:33,356 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c1a50fd4c077917cdbb56a8a483912c5: 2024-11-17T15:32:33,360 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c1a50fd4c077917cdbb56a8a483912c5/info of c1a50fd4c077917cdbb56a8a483912c5 into fd4342621a234e25b42da498ed968750(size=89.7 K), total size for store is 108.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T15:32:33,360 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c1a50fd4c077917cdbb56a8a483912c5: 2024-11-17T15:32:33,360 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5., storeName=c1a50fd4c077917cdbb56a8a483912c5/info, priority=13, startTime=1731857553331; duration=0sec 2024-11-17T15:32:33,360 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T15:32:33,360 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a50fd4c077917cdbb56a8a483912c5:info 2024-11-17T15:32:33,806 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:33,806 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:34,807 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:34,807 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:35,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35813 {}] regionserver.HRegion(8855): Flush requested on c1a50fd4c077917cdbb56a8a483912c5 2024-11-17T15:32:35,346 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c1a50fd4c077917cdbb56a8a483912c5 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-17T15:32:35,350 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/950d96977c454910b011b25ed5e0f8ab is 1080, key is row0155/info:/1731857553333/Put/seqid=0 2024-11-17T15:32:35,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741864_1040 (size=12516) 2024-11-17T15:32:35,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741864_1040 (size=12516) 2024-11-17T15:32:35,356 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/950d96977c454910b011b25ed5e0f8ab 2024-11-17T15:32:35,361 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/950d96977c454910b011b25ed5e0f8ab as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/950d96977c454910b011b25ed5e0f8ab 2024-11-17T15:32:35,366 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/950d96977c454910b011b25ed5e0f8ab, entries=7, sequenceid=213, filesize=12.2 K 2024-11-17T15:32:35,367 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for c1a50fd4c077917cdbb56a8a483912c5 in 21ms, sequenceid=213, compaction requested=true 2024-11-17T15:32:35,367 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c1a50fd4c077917cdbb56a8a483912c5: 2024-11-17T15:32:35,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a50fd4c077917cdbb56a8a483912c5:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T15:32:35,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T15:32:35,367 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T15:32:35,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35813 {}] regionserver.HRegion(8855): Flush requested on c1a50fd4c077917cdbb56a8a483912c5 2024-11-17T15:32:35,368 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c1a50fd4c077917cdbb56a8a483912c5 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-17T15:32:35,368 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 123365 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T15:32:35,368 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HStore(1541): c1a50fd4c077917cdbb56a8a483912c5/info is initiating minor compaction (all files) 2024-11-17T15:32:35,368 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c1a50fd4c077917cdbb56a8a483912c5/info in TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5. 2024-11-17T15:32:35,368 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/fd4342621a234e25b42da498ed968750, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/6bef22486423469abf498cfde8082f7a, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/950d96977c454910b011b25ed5e0f8ab] into tmpdir=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp, totalSize=120.5 K 2024-11-17T15:32:35,369 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.Compactor(225): Compacting fd4342621a234e25b42da498ed968750, keycount=80, bloomtype=ROW, size=89.7 K, encoding=NONE, compression=NONE, seqNum=186, earliestPutTs=1731857527020 2024-11-17T15:32:35,369 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6bef22486423469abf498cfde8082f7a, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1731857553297 2024-11-17T15:32:35,370 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.Compactor(225): Compacting 950d96977c454910b011b25ed5e0f8ab, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1731857553333 2024-11-17T15:32:35,371 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/d3fa5f5df9074e80b750213f1656eb38 is 1080, key is row0162/info:/1731857555347/Put/seqid=0 2024-11-17T15:32:35,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741865_1041 (size=19000) 2024-11-17T15:32:35,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741865_1041 (size=19000) 2024-11-17T15:32:35,378 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=229 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/d3fa5f5df9074e80b750213f1656eb38 2024-11-17T15:32:35,382 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a50fd4c077917cdbb56a8a483912c5#info#compaction#82 average throughput is 51.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T15:32:35,383 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/fcb83246921a4314ba301ae93acee4f9 is 1080, key is row0062/info:/1731857527020/Put/seqid=0 2024-11-17T15:32:35,383 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/d3fa5f5df9074e80b750213f1656eb38 as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/d3fa5f5df9074e80b750213f1656eb38 2024-11-17T15:32:35,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741866_1042 (size=113515) 2024-11-17T15:32:35,388 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/d3fa5f5df9074e80b750213f1656eb38, entries=13, sequenceid=229, filesize=18.6 K 2024-11-17T15:32:35,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741866_1042 (size=113515) 2024-11-17T15:32:35,389 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=12.61 KB/12912 for c1a50fd4c077917cdbb56a8a483912c5 in 21ms, sequenceid=229, compaction requested=false 2024-11-17T15:32:35,389 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c1a50fd4c077917cdbb56a8a483912c5: 2024-11-17T15:32:35,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35813 {}] regionserver.HRegion(8855): Flush requested on c1a50fd4c077917cdbb56a8a483912c5 2024-11-17T15:32:35,389 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c1a50fd4c077917cdbb56a8a483912c5 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-17T15:32:35,393 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/fcb83246921a4314ba301ae93acee4f9 as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/fcb83246921a4314ba301ae93acee4f9 2024-11-17T15:32:35,394 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/886b2dc3665445d4968ced2843e1b78d is 1080, key is row0175/info:/1731857555369/Put/seqid=0 2024-11-17T15:32:35,400 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c1a50fd4c077917cdbb56a8a483912c5/info of c1a50fd4c077917cdbb56a8a483912c5 into fcb83246921a4314ba301ae93acee4f9(size=110.9 K), total size for store is 129.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T15:32:35,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741867_1043 (size=19000) 2024-11-17T15:32:35,400 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c1a50fd4c077917cdbb56a8a483912c5: 2024-11-17T15:32:35,400 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5., storeName=c1a50fd4c077917cdbb56a8a483912c5/info, priority=13, startTime=1731857555367; duration=0sec 2024-11-17T15:32:35,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741867_1043 (size=19000) 2024-11-17T15:32:35,400 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T15:32:35,400 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a50fd4c077917cdbb56a8a483912c5:info 2024-11-17T15:32:35,704 INFO [master/7a780d55532c:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-17T15:32:35,704 INFO [master/7a780d55532c:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-17T15:32:35,801 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/886b2dc3665445d4968ced2843e1b78d 2024-11-17T15:32:35,806 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/886b2dc3665445d4968ced2843e1b78d as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/886b2dc3665445d4968ced2843e1b78d 2024-11-17T15:32:35,808 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:35,808 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:35,811 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/886b2dc3665445d4968ced2843e1b78d, entries=13, sequenceid=245, filesize=18.6 K 2024-11-17T15:32:35,812 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=5.25 KB/5380 for c1a50fd4c077917cdbb56a8a483912c5 in 423ms, sequenceid=245, compaction requested=true 2024-11-17T15:32:35,812 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c1a50fd4c077917cdbb56a8a483912c5: 2024-11-17T15:32:35,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a50fd4c077917cdbb56a8a483912c5:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T15:32:35,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T15:32:35,812 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T15:32:35,813 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 151515 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T15:32:35,813 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HStore(1541): c1a50fd4c077917cdbb56a8a483912c5/info is initiating minor compaction (all files) 2024-11-17T15:32:35,813 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c1a50fd4c077917cdbb56a8a483912c5/info in TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5. 2024-11-17T15:32:35,813 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/fcb83246921a4314ba301ae93acee4f9, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/d3fa5f5df9074e80b750213f1656eb38, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/886b2dc3665445d4968ced2843e1b78d] into tmpdir=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp, totalSize=148.0 K 2024-11-17T15:32:35,813 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.Compactor(225): Compacting fcb83246921a4314ba301ae93acee4f9, keycount=100, bloomtype=ROW, size=110.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1731857527020 2024-11-17T15:32:35,814 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.Compactor(225): Compacting d3fa5f5df9074e80b750213f1656eb38, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=229, earliestPutTs=1731857555347 2024-11-17T15:32:35,814 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.Compactor(225): Compacting 886b2dc3665445d4968ced2843e1b78d, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1731857555369 2024-11-17T15:32:35,824 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a50fd4c077917cdbb56a8a483912c5#info#compaction#84 average throughput is 64.65 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T15:32:35,825 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/7ea19f86ba0c4a2e99080dddf808cef1 is 1080, key is row0062/info:/1731857527020/Put/seqid=0 2024-11-17T15:32:35,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741868_1044 (size=141850) 2024-11-17T15:32:35,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741868_1044 (size=141850) 2024-11-17T15:32:35,833 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/7ea19f86ba0c4a2e99080dddf808cef1 as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/7ea19f86ba0c4a2e99080dddf808cef1 2024-11-17T15:32:35,838 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c1a50fd4c077917cdbb56a8a483912c5/info of c1a50fd4c077917cdbb56a8a483912c5 into 7ea19f86ba0c4a2e99080dddf808cef1(size=138.5 K), total size for store is 138.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T15:32:35,838 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c1a50fd4c077917cdbb56a8a483912c5: 2024-11-17T15:32:35,838 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5., storeName=c1a50fd4c077917cdbb56a8a483912c5/info, priority=13, startTime=1731857555812; duration=0sec 2024-11-17T15:32:35,838 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T15:32:35,838 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a50fd4c077917cdbb56a8a483912c5:info 2024-11-17T15:32:36,808 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:36,808 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:37,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35813 {}] regionserver.HRegion(8855): Flush requested on c1a50fd4c077917cdbb56a8a483912c5 2024-11-17T15:32:37,401 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c1a50fd4c077917cdbb56a8a483912c5 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-17T15:32:37,406 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/9a6363d391244813a83e9df3c5b9fcfb is 1080, key is row0188/info:/1731857555390/Put/seqid=0 2024-11-17T15:32:37,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741869_1045 (size=12517) 2024-11-17T15:32:37,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741869_1045 (size=12517) 2024-11-17T15:32:37,412 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=257 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/9a6363d391244813a83e9df3c5b9fcfb 2024-11-17T15:32:37,417 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/9a6363d391244813a83e9df3c5b9fcfb as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/9a6363d391244813a83e9df3c5b9fcfb 2024-11-17T15:32:37,422 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/9a6363d391244813a83e9df3c5b9fcfb, entries=7, sequenceid=257, filesize=12.2 K 2024-11-17T15:32:37,423 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=15.76 KB/16140 for c1a50fd4c077917cdbb56a8a483912c5 in 22ms, sequenceid=257, compaction requested=false 2024-11-17T15:32:37,423 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c1a50fd4c077917cdbb56a8a483912c5: 2024-11-17T15:32:37,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35813 {}] regionserver.HRegion(8855): Flush requested on c1a50fd4c077917cdbb56a8a483912c5 2024-11-17T15:32:37,425 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c1a50fd4c077917cdbb56a8a483912c5 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-17T15:32:37,429 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/aa1d742b25e742aaa6eec9cf39258235 is 1080, key is row0195/info:/1731857557402/Put/seqid=0 2024-11-17T15:32:37,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741870_1046 (size=22254) 2024-11-17T15:32:37,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741870_1046 (size=22254) 2024-11-17T15:32:37,435 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/aa1d742b25e742aaa6eec9cf39258235 2024-11-17T15:32:37,440 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/aa1d742b25e742aaa6eec9cf39258235 as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/aa1d742b25e742aaa6eec9cf39258235 2024-11-17T15:32:37,445 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/aa1d742b25e742aaa6eec9cf39258235, entries=16, sequenceid=276, filesize=21.7 K 2024-11-17T15:32:37,445 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35813 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=c1a50fd4c077917cdbb56a8a483912c5, server=7a780d55532c,35813,1731857514065 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-17T15:32:37,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35813 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:50806 deadline: 1731857567445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=c1a50fd4c077917cdbb56a8a483912c5, server=7a780d55532c,35813,1731857514065 2024-11-17T15:32:37,446 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=13.66 KB/13988 for c1a50fd4c077917cdbb56a8a483912c5 in 21ms, sequenceid=276, compaction requested=true 2024-11-17T15:32:37,446 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c1a50fd4c077917cdbb56a8a483912c5: 2024-11-17T15:32:37,446 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5., hostname=7a780d55532c,35813,1731857514065, seqNum=89 , the old value is region=TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5., hostname=7a780d55532c,35813,1731857514065, seqNum=89, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=c1a50fd4c077917cdbb56a8a483912c5, server=7a780d55532c,35813,1731857514065 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-17T15:32:37,446 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5., hostname=7a780d55532c,35813,1731857514065, seqNum=89 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=c1a50fd4c077917cdbb56a8a483912c5, server=7a780d55532c,35813,1731857514065 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-17T15:32:37,446 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5., hostname=7a780d55532c,35813,1731857514065, seqNum=89 because the exception is null or not the one we care about 2024-11-17T15:32:37,446 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a50fd4c077917cdbb56a8a483912c5:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T15:32:37,446 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T15:32:37,446 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T15:32:37,447 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 176621 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T15:32:37,447 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HStore(1541): c1a50fd4c077917cdbb56a8a483912c5/info is initiating minor compaction (all files) 2024-11-17T15:32:37,447 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c1a50fd4c077917cdbb56a8a483912c5/info in TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5. 2024-11-17T15:32:37,448 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/7ea19f86ba0c4a2e99080dddf808cef1, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/9a6363d391244813a83e9df3c5b9fcfb, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/aa1d742b25e742aaa6eec9cf39258235] into tmpdir=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp, totalSize=172.5 K 2024-11-17T15:32:37,448 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7ea19f86ba0c4a2e99080dddf808cef1, keycount=126, bloomtype=ROW, size=138.5 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1731857527020 2024-11-17T15:32:37,448 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9a6363d391244813a83e9df3c5b9fcfb, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=257, earliestPutTs=1731857555390 2024-11-17T15:32:37,448 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.Compactor(225): Compacting aa1d742b25e742aaa6eec9cf39258235, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1731857557402 2024-11-17T15:32:37,458 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a50fd4c077917cdbb56a8a483912c5#info#compaction#87 average throughput is 76.45 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T15:32:37,459 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/7a3fb3d95a614fe4beb3c3e91b262686 is 1080, key is row0062/info:/1731857527020/Put/seqid=0 2024-11-17T15:32:37,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741871_1047 (size=166767) 2024-11-17T15:32:37,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741871_1047 (size=166767) 2024-11-17T15:32:37,467 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/7a3fb3d95a614fe4beb3c3e91b262686 as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/7a3fb3d95a614fe4beb3c3e91b262686 2024-11-17T15:32:37,472 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c1a50fd4c077917cdbb56a8a483912c5/info of c1a50fd4c077917cdbb56a8a483912c5 into 7a3fb3d95a614fe4beb3c3e91b262686(size=162.9 K), total size for store is 162.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T15:32:37,472 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c1a50fd4c077917cdbb56a8a483912c5: 2024-11-17T15:32:37,472 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5., storeName=c1a50fd4c077917cdbb56a8a483912c5/info, priority=13, startTime=1731857557446; duration=0sec 2024-11-17T15:32:37,472 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T15:32:37,472 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a50fd4c077917cdbb56a8a483912c5:info 2024-11-17T15:32:37,809 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:37,809 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:38,809 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:38,809 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:39,810 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:39,810 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:39,813 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-11-17T15:32:40,810 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:40,810 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:41,811 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:41,811 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:42,811 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:42,811 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:43,812 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:43,812 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:44,813 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:44,813 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:45,813 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:45,813 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:46,814 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:46,814 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:47,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35813 {}] regionserver.HRegion(8855): Flush requested on c1a50fd4c077917cdbb56a8a483912c5 2024-11-17T15:32:47,527 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c1a50fd4c077917cdbb56a8a483912c5 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-17T15:32:47,531 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/be40d651ea4a442092f49102e5b363e8 is 1080, key is row0211/info:/1731857557425/Put/seqid=0 2024-11-17T15:32:47,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741872_1048 (size=20092) 2024-11-17T15:32:47,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741872_1048 (size=20092) 2024-11-17T15:32:47,536 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/be40d651ea4a442092f49102e5b363e8 2024-11-17T15:32:47,541 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/be40d651ea4a442092f49102e5b363e8 as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/be40d651ea4a442092f49102e5b363e8 2024-11-17T15:32:47,545 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/be40d651ea4a442092f49102e5b363e8, entries=14, sequenceid=294, filesize=19.6 K 2024-11-17T15:32:47,546 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=0 B/0 for c1a50fd4c077917cdbb56a8a483912c5 in 19ms, sequenceid=294, compaction requested=false 2024-11-17T15:32:47,546 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c1a50fd4c077917cdbb56a8a483912c5: 2024-11-17T15:32:47,814 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:47,814 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:48,815 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:48,815 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:49,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35813 {}] regionserver.HRegion(8855): Flush requested on c1a50fd4c077917cdbb56a8a483912c5 2024-11-17T15:32:49,537 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c1a50fd4c077917cdbb56a8a483912c5 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-17T15:32:49,541 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/d79b19f2b3d549148338301160a87ad3 is 1080, key is row0225/info:/1731857569528/Put/seqid=0 2024-11-17T15:32:49,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741873_1049 (size=12523) 2024-11-17T15:32:49,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741873_1049 (size=12523) 2024-11-17T15:32:49,550 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=304 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/d79b19f2b3d549148338301160a87ad3 2024-11-17T15:32:49,555 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/d79b19f2b3d549148338301160a87ad3 as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/d79b19f2b3d549148338301160a87ad3 2024-11-17T15:32:49,560 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/d79b19f2b3d549148338301160a87ad3, entries=7, sequenceid=304, filesize=12.2 K 2024-11-17T15:32:49,561 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=16.81 KB/17216 for c1a50fd4c077917cdbb56a8a483912c5 in 24ms, sequenceid=304, compaction requested=true 2024-11-17T15:32:49,561 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c1a50fd4c077917cdbb56a8a483912c5: 2024-11-17T15:32:49,561 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1a50fd4c077917cdbb56a8a483912c5:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T15:32:49,561 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T15:32:49,561 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T15:32:49,562 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 199382 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T15:32:49,562 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HStore(1541): c1a50fd4c077917cdbb56a8a483912c5/info is initiating minor compaction (all files) 2024-11-17T15:32:49,562 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c1a50fd4c077917cdbb56a8a483912c5/info in TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5. 2024-11-17T15:32:49,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35813 {}] regionserver.HRegion(8855): Flush requested on c1a50fd4c077917cdbb56a8a483912c5 2024-11-17T15:32:49,562 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/7a3fb3d95a614fe4beb3c3e91b262686, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/be40d651ea4a442092f49102e5b363e8, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/d79b19f2b3d549148338301160a87ad3] into tmpdir=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp, totalSize=194.7 K 2024-11-17T15:32:49,562 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c1a50fd4c077917cdbb56a8a483912c5 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-11-17T15:32:49,563 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7a3fb3d95a614fe4beb3c3e91b262686, keycount=149, bloomtype=ROW, size=162.9 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1731857527020 2024-11-17T15:32:49,563 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.Compactor(225): Compacting be40d651ea4a442092f49102e5b363e8, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1731857557425 2024-11-17T15:32:49,563 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] compactions.Compactor(225): Compacting d79b19f2b3d549148338301160a87ad3, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1731857569528 2024-11-17T15:32:49,566 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/d8c1dc35fda54d5188b3ad75b6d45b69 is 1080, key is row0232/info:/1731857569538/Put/seqid=0 2024-11-17T15:32:49,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741874_1050 (size=23333) 2024-11-17T15:32:49,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741874_1050 (size=23333) 2024-11-17T15:32:49,573 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/d8c1dc35fda54d5188b3ad75b6d45b69 2024-11-17T15:32:49,577 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1a50fd4c077917cdbb56a8a483912c5#info#compaction#91 average throughput is 58.15 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T15:32:49,577 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/ed0fe220ba944ab0aab42e79fab47b1a is 1080, key is row0062/info:/1731857527020/Put/seqid=0 2024-11-17T15:32:49,579 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/d8c1dc35fda54d5188b3ad75b6d45b69 as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/d8c1dc35fda54d5188b3ad75b6d45b69 2024-11-17T15:32:49,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741875_1051 (size=189536) 2024-11-17T15:32:49,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741875_1051 (size=189536) 2024-11-17T15:32:49,584 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/d8c1dc35fda54d5188b3ad75b6d45b69, entries=17, sequenceid=324, filesize=22.8 K 2024-11-17T15:32:49,585 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=8.41 KB/8608 for c1a50fd4c077917cdbb56a8a483912c5 in 23ms, sequenceid=324, compaction requested=false 2024-11-17T15:32:49,585 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c1a50fd4c077917cdbb56a8a483912c5: 2024-11-17T15:32:49,589 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/ed0fe220ba944ab0aab42e79fab47b1a as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/ed0fe220ba944ab0aab42e79fab47b1a 2024-11-17T15:32:49,594 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c1a50fd4c077917cdbb56a8a483912c5/info of c1a50fd4c077917cdbb56a8a483912c5 into ed0fe220ba944ab0aab42e79fab47b1a(size=185.1 K), total size for store is 207.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T15:32:49,594 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c1a50fd4c077917cdbb56a8a483912c5: 2024-11-17T15:32:49,594 INFO [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5., storeName=c1a50fd4c077917cdbb56a8a483912c5/info, priority=13, startTime=1731857569561; duration=0sec 2024-11-17T15:32:49,594 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T15:32:49,594 DEBUG [RS:0;7a780d55532c:35813-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1a50fd4c077917cdbb56a8a483912c5:info 2024-11-17T15:32:49,815 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:49,815 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:50,816 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:50,816 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:51,575 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-17T15:32:51,575 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C35813%2C1731857514065.1731857571575 2024-11-17T15:32:51,581 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:51,581 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:51,581 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:51,582 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:51,582 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:51,582 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/WALs/7a780d55532c,35813,1731857514065/7a780d55532c%2C35813%2C1731857514065.1731857514443 with entries=313, filesize=308.60 KB; new WAL /user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/WALs/7a780d55532c,35813,1731857514065/7a780d55532c%2C35813%2C1731857514065.1731857571575 2024-11-17T15:32:51,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741833_1009 (size=316019) 2024-11-17T15:32:51,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741833_1009 (size=316019) 2024-11-17T15:32:51,589 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41199:41199),(127.0.0.1/127.0.0.1:45857:45857)] 2024-11-17T15:32:51,593 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing c1a50fd4c077917cdbb56a8a483912c5 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-17T15:32:51,597 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/429aba9647e241579092627c40bc2c97 is 1080, key is row0249/info:/1731857569563/Put/seqid=0 2024-11-17T15:32:51,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741877_1053 (size=13602) 2024-11-17T15:32:51,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741877_1053 (size=13602) 2024-11-17T15:32:51,601 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/429aba9647e241579092627c40bc2c97 2024-11-17T15:32:51,606 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/.tmp/info/429aba9647e241579092627c40bc2c97 as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/429aba9647e241579092627c40bc2c97 2024-11-17T15:32:51,609 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/429aba9647e241579092627c40bc2c97, entries=8, sequenceid=336, filesize=13.3 K 2024-11-17T15:32:51,610 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=0 B/0 for c1a50fd4c077917cdbb56a8a483912c5 in 17ms, sequenceid=336, compaction requested=true 2024-11-17T15:32:51,610 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for c1a50fd4c077917cdbb56a8a483912c5: 2024-11-17T15:32:51,611 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-17T15:32:51,614 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/hbase/meta/1588230740/.tmp/info/b9c56f94db56404c9e0583e7cb0d5b9c is 193, key is TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5./info:regioninfo/1731857527763/Put/seqid=0 2024-11-17T15:32:51,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741878_1054 (size=6223) 2024-11-17T15:32:51,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741878_1054 (size=6223) 2024-11-17T15:32:51,618 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/hbase/meta/1588230740/.tmp/info/b9c56f94db56404c9e0583e7cb0d5b9c 2024-11-17T15:32:51,623 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/hbase/meta/1588230740/.tmp/info/b9c56f94db56404c9e0583e7cb0d5b9c as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/hbase/meta/1588230740/info/b9c56f94db56404c9e0583e7cb0d5b9c 2024-11-17T15:32:51,626 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/hbase/meta/1588230740/info/b9c56f94db56404c9e0583e7cb0d5b9c, entries=5, sequenceid=21, filesize=6.1 K 2024-11-17T15:32:51,627 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 17ms, sequenceid=21, compaction requested=false 2024-11-17T15:32:51,627 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-17T15:32:51,628 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 4a33dad81f9236978607e15daf66bfbd: 2024-11-17T15:32:51,628 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C35813%2C1731857514065.1731857571628 2024-11-17T15:32:51,633 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:51,633 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:51,633 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:51,633 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:51,633 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:51,633 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/WALs/7a780d55532c,35813,1731857514065/7a780d55532c%2C35813%2C1731857514065.1731857571575 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/WALs/7a780d55532c,35813,1731857514065/7a780d55532c%2C35813%2C1731857514065.1731857571628 2024-11-17T15:32:51,634 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41199:41199),(127.0.0.1/127.0.0.1:45857:45857)] 2024-11-17T15:32:51,634 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/WALs/7a780d55532c,35813,1731857514065/7a780d55532c%2C35813%2C1731857514065.1731857571575 is not closed yet, will try archiving it next time 2024-11-17T15:32:51,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741876_1052 (size=731) 2024-11-17T15:32:51,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741876_1052 (size=731) 2024-11-17T15:32:51,636 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/WALs/7a780d55532c,35813,1731857514065/7a780d55532c%2C35813%2C1731857514065.1731857514443 to hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/oldWALs/7a780d55532c%2C35813%2C1731857514065.1731857514443 2024-11-17T15:32:51,637 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-17T15:32:51,637 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-17T15:32:51,637 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T15:32:51,637 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T15:32:51,637 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:32:51,637 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:32:51,637 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-17T15:32:51,637 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/WALs/7a780d55532c,35813,1731857514065/7a780d55532c%2C35813%2C1731857514065.1731857571575 to hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/oldWALs/7a780d55532c%2C35813%2C1731857514065.1731857571575 2024-11-17T15:32:51,638 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-17T15:32:51,638 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=317888390, stopped=false 2024-11-17T15:32:51,638 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7a780d55532c,39309,1731857514009 2024-11-17T15:32:51,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35813-0x101268e68960001, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T15:32:51,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39309-0x101268e68960000, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T15:32:51,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35813-0x101268e68960001, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:32:51,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39309-0x101268e68960000, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:32:51,639 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T15:32:51,639 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T15:32:51,640 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T15:32:51,640 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:32:51,640 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7a780d55532c,35813,1731857514065' ***** 2024-11-17T15:32:51,640 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-17T15:32:51,640 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35813-0x101268e68960001, quorum=127.0.0.1:64124, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T15:32:51,640 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39309-0x101268e68960000, quorum=127.0.0.1:64124, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T15:32:51,640 INFO [RS:0;7a780d55532c:35813 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-17T15:32:51,640 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-17T15:32:51,641 INFO [RS:0;7a780d55532c:35813 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-17T15:32:51,641 INFO [RS:0;7a780d55532c:35813 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-17T15:32:51,641 INFO [RS:0;7a780d55532c:35813 {}] regionserver.HRegionServer(3091): Received CLOSE for c1a50fd4c077917cdbb56a8a483912c5 2024-11-17T15:32:51,641 INFO [RS:0;7a780d55532c:35813 {}] regionserver.HRegionServer(3091): Received CLOSE for 4a33dad81f9236978607e15daf66bfbd 2024-11-17T15:32:51,641 INFO [RS:0;7a780d55532c:35813 {}] regionserver.HRegionServer(959): stopping server 7a780d55532c,35813,1731857514065 2024-11-17T15:32:51,641 INFO [RS:0;7a780d55532c:35813 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T15:32:51,641 INFO [RS:0;7a780d55532c:35813 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7a780d55532c:35813. 2024-11-17T15:32:51,641 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing c1a50fd4c077917cdbb56a8a483912c5, disabling compactions & flushes 2024-11-17T15:32:51,641 DEBUG [RS:0;7a780d55532c:35813 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T15:32:51,641 DEBUG [RS:0;7a780d55532c:35813 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:32:51,641 INFO [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5. 2024-11-17T15:32:51,641 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5. 2024-11-17T15:32:51,641 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5. after waiting 0 ms 2024-11-17T15:32:51,641 INFO [RS:0;7a780d55532c:35813 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-17T15:32:51,641 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5. 2024-11-17T15:32:51,641 INFO [RS:0;7a780d55532c:35813 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-17T15:32:51,641 INFO [RS:0;7a780d55532c:35813 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-17T15:32:51,641 INFO [RS:0;7a780d55532c:35813 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-17T15:32:51,641 INFO [RS:0;7a780d55532c:35813 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-17T15:32:51,641 DEBUG [RS:0;7a780d55532c:35813 {}] regionserver.HRegionServer(1325): Online Regions={c1a50fd4c077917cdbb56a8a483912c5=TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5., 1588230740=hbase:meta,,1.1588230740, 4a33dad81f9236978607e15daf66bfbd=TestLogRolling-testLogRolling,,1731857527087.4a33dad81f9236978607e15daf66bfbd.} 2024-11-17T15:32:51,641 DEBUG [RS:0;7a780d55532c:35813 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 4a33dad81f9236978607e15daf66bfbd, c1a50fd4c077917cdbb56a8a483912c5 2024-11-17T15:32:51,641 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T15:32:51,642 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T15:32:51,642 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T15:32:51,642 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T15:32:51,642 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T15:32:51,642 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/a54994361c5941d0bcb17b7c2035107b.72a877004756a59e58089feba8324f4a->hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/a54994361c5941d0bcb17b7c2035107b-top, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/8b4eb685df914825b32457ff86be01e6, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/TestLogRolling-testLogRolling=72a877004756a59e58089feba8324f4a-dae32ade9b9f4e8ba8e9e5fcf9efd74f, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/1cd3e0becce84685bb68f2c09f5d41da, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/9c295373a667476d9cc9217fe20b1bd4, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/f0591d7295d143efb78d1aa10490cbe1, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/665d183b190342359598e1173014f8dc, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/878ea5e89f4e4b12a58ad0637cfba07f, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/016773c0f2fc48e680bedde6713c7a44, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/eba922b28154423db1971327f84fda43, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/fd4342621a234e25b42da498ed968750, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/ba474e12bcbd4c6d9b866cf905e2e957, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/6bef22486423469abf498cfde8082f7a, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/fcb83246921a4314ba301ae93acee4f9, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/950d96977c454910b011b25ed5e0f8ab, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/d3fa5f5df9074e80b750213f1656eb38, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/7ea19f86ba0c4a2e99080dddf808cef1, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/886b2dc3665445d4968ced2843e1b78d, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/9a6363d391244813a83e9df3c5b9fcfb, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/7a3fb3d95a614fe4beb3c3e91b262686, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/aa1d742b25e742aaa6eec9cf39258235, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/be40d651ea4a442092f49102e5b363e8, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/d79b19f2b3d549148338301160a87ad3] to archive 2024-11-17T15:32:51,643 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T15:32:51,644 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/a54994361c5941d0bcb17b7c2035107b.72a877004756a59e58089feba8324f4a to hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/archive/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/a54994361c5941d0bcb17b7c2035107b.72a877004756a59e58089feba8324f4a 2024-11-17T15:32:51,646 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/8b4eb685df914825b32457ff86be01e6 to hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/archive/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/8b4eb685df914825b32457ff86be01e6 2024-11-17T15:32:51,647 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/TestLogRolling-testLogRolling=72a877004756a59e58089feba8324f4a-dae32ade9b9f4e8ba8e9e5fcf9efd74f to hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/archive/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/TestLogRolling-testLogRolling=72a877004756a59e58089feba8324f4a-dae32ade9b9f4e8ba8e9e5fcf9efd74f 2024-11-17T15:32:51,647 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-17T15:32:51,648 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T15:32:51,648 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T15:32:51,648 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731857571641Running coprocessor pre-close hooks at 1731857571641Disabling compacts and flushes for region at 1731857571641Disabling writes for close at 1731857571642 (+1 ms)Writing region close event to WAL at 1731857571644 (+2 ms)Running coprocessor post-close hooks at 1731857571648 (+4 ms)Closed at 1731857571648 2024-11-17T15:32:51,648 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/1cd3e0becce84685bb68f2c09f5d41da to hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/archive/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/1cd3e0becce84685bb68f2c09f5d41da 2024-11-17T15:32:51,648 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-17T15:32:51,649 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/9c295373a667476d9cc9217fe20b1bd4 to hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/archive/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/9c295373a667476d9cc9217fe20b1bd4 2024-11-17T15:32:51,650 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/f0591d7295d143efb78d1aa10490cbe1 to hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/archive/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/f0591d7295d143efb78d1aa10490cbe1 2024-11-17T15:32:51,651 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/665d183b190342359598e1173014f8dc to hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/archive/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/665d183b190342359598e1173014f8dc 2024-11-17T15:32:51,652 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/878ea5e89f4e4b12a58ad0637cfba07f to hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/archive/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/878ea5e89f4e4b12a58ad0637cfba07f 2024-11-17T15:32:51,653 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/016773c0f2fc48e680bedde6713c7a44 to hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/archive/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/016773c0f2fc48e680bedde6713c7a44 2024-11-17T15:32:51,654 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/eba922b28154423db1971327f84fda43 to hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/archive/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/eba922b28154423db1971327f84fda43 2024-11-17T15:32:51,655 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/fd4342621a234e25b42da498ed968750 to hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/archive/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/fd4342621a234e25b42da498ed968750 2024-11-17T15:32:51,656 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/ba474e12bcbd4c6d9b866cf905e2e957 to hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/archive/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/ba474e12bcbd4c6d9b866cf905e2e957 2024-11-17T15:32:51,657 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/6bef22486423469abf498cfde8082f7a to hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/archive/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/6bef22486423469abf498cfde8082f7a 2024-11-17T15:32:51,658 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/fcb83246921a4314ba301ae93acee4f9 to hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/archive/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/fcb83246921a4314ba301ae93acee4f9 2024-11-17T15:32:51,659 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/950d96977c454910b011b25ed5e0f8ab to hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/archive/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/950d96977c454910b011b25ed5e0f8ab 2024-11-17T15:32:51,660 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/d3fa5f5df9074e80b750213f1656eb38 to hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/archive/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/d3fa5f5df9074e80b750213f1656eb38 2024-11-17T15:32:51,661 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/7ea19f86ba0c4a2e99080dddf808cef1 to hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/archive/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/7ea19f86ba0c4a2e99080dddf808cef1 2024-11-17T15:32:51,662 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/886b2dc3665445d4968ced2843e1b78d to hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/archive/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/886b2dc3665445d4968ced2843e1b78d 2024-11-17T15:32:51,663 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/9a6363d391244813a83e9df3c5b9fcfb to hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/archive/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/9a6363d391244813a83e9df3c5b9fcfb 2024-11-17T15:32:51,664 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/7a3fb3d95a614fe4beb3c3e91b262686 to hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/archive/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/7a3fb3d95a614fe4beb3c3e91b262686 2024-11-17T15:32:51,665 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/aa1d742b25e742aaa6eec9cf39258235 to hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/archive/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/aa1d742b25e742aaa6eec9cf39258235 2024-11-17T15:32:51,666 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/be40d651ea4a442092f49102e5b363e8 to hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/archive/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/be40d651ea4a442092f49102e5b363e8 2024-11-17T15:32:51,667 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/d79b19f2b3d549148338301160a87ad3 to hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/archive/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/info/d79b19f2b3d549148338301160a87ad3 2024-11-17T15:32:51,667 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=7a780d55532c:39309 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-17T15:32:51,667 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [8b4eb685df914825b32457ff86be01e6=8359, 1cd3e0becce84685bb68f2c09f5d41da=12509, 9c295373a667476d9cc9217fe20b1bd4=40835, f0591d7295d143efb78d1aa10490cbe1=29761, 665d183b190342359598e1173014f8dc=12515, 878ea5e89f4e4b12a58ad0637cfba07f=65798, 016773c0f2fc48e680bedde6713c7a44=22238, eba922b28154423db1971327f84fda43=17906, fd4342621a234e25b42da498ed968750=91849, ba474e12bcbd4c6d9b866cf905e2e957=17906, 6bef22486423469abf498cfde8082f7a=19000, fcb83246921a4314ba301ae93acee4f9=113515, 950d96977c454910b011b25ed5e0f8ab=12516, d3fa5f5df9074e80b750213f1656eb38=19000, 7ea19f86ba0c4a2e99080dddf808cef1=141850, 886b2dc3665445d4968ced2843e1b78d=19000, 9a6363d391244813a83e9df3c5b9fcfb=12517, 7a3fb3d95a614fe4beb3c3e91b262686=166767, aa1d742b25e742aaa6eec9cf39258235=22254, be40d651ea4a442092f49102e5b363e8=20092, d79b19f2b3d549148338301160a87ad3=12523] 2024-11-17T15:32:51,671 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/c1a50fd4c077917cdbb56a8a483912c5/recovered.edits/339.seqid, newMaxSeqId=339, maxSeqId=88 2024-11-17T15:32:51,671 INFO [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5. 2024-11-17T15:32:51,671 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for c1a50fd4c077917cdbb56a8a483912c5: Waiting for close lock at 1731857571641Running coprocessor pre-close hooks at 1731857571641Disabling compacts and flushes for region at 1731857571641Disabling writes for close at 1731857571641Writing region close event to WAL at 1731857571668 (+27 ms)Running coprocessor post-close hooks at 1731857571671 (+3 ms)Closed at 1731857571671 2024-11-17T15:32:51,671 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731857527087.c1a50fd4c077917cdbb56a8a483912c5. 2024-11-17T15:32:51,671 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 4a33dad81f9236978607e15daf66bfbd, disabling compactions & flushes 2024-11-17T15:32:51,671 INFO [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731857527087.4a33dad81f9236978607e15daf66bfbd. 2024-11-17T15:32:51,671 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731857527087.4a33dad81f9236978607e15daf66bfbd. 2024-11-17T15:32:51,671 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731857527087.4a33dad81f9236978607e15daf66bfbd. after waiting 0 ms 2024-11-17T15:32:51,671 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731857527087.4a33dad81f9236978607e15daf66bfbd. 2024-11-17T15:32:51,672 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731857527087.4a33dad81f9236978607e15daf66bfbd.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/4a33dad81f9236978607e15daf66bfbd/info/a54994361c5941d0bcb17b7c2035107b.72a877004756a59e58089feba8324f4a->hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/72a877004756a59e58089feba8324f4a/info/a54994361c5941d0bcb17b7c2035107b-bottom] to archive 2024-11-17T15:32:51,672 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731857527087.4a33dad81f9236978607e15daf66bfbd.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T15:32:51,673 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731857527087.4a33dad81f9236978607e15daf66bfbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/4a33dad81f9236978607e15daf66bfbd/info/a54994361c5941d0bcb17b7c2035107b.72a877004756a59e58089feba8324f4a to hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/archive/data/default/TestLogRolling-testLogRolling/4a33dad81f9236978607e15daf66bfbd/info/a54994361c5941d0bcb17b7c2035107b.72a877004756a59e58089feba8324f4a 2024-11-17T15:32:51,673 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731857527087.4a33dad81f9236978607e15daf66bfbd.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-17T15:32:51,676 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/data/default/TestLogRolling-testLogRolling/4a33dad81f9236978607e15daf66bfbd/recovered.edits/93.seqid, newMaxSeqId=93, maxSeqId=88 2024-11-17T15:32:51,677 INFO [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731857527087.4a33dad81f9236978607e15daf66bfbd. 2024-11-17T15:32:51,677 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 4a33dad81f9236978607e15daf66bfbd: Waiting for close lock at 1731857571671Running coprocessor pre-close hooks at 1731857571671Disabling compacts and flushes for region at 1731857571671Disabling writes for close at 1731857571671Writing region close event to WAL at 1731857571674 (+3 ms)Running coprocessor post-close hooks at 1731857571677 (+3 ms)Closed at 1731857571677 2024-11-17T15:32:51,677 DEBUG [RS_CLOSE_REGION-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731857527087.4a33dad81f9236978607e15daf66bfbd. 2024-11-17T15:32:51,816 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:51,816 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:51,842 INFO [RS:0;7a780d55532c:35813 {}] regionserver.HRegionServer(976): stopping server 7a780d55532c,35813,1731857514065; all regions closed. 2024-11-17T15:32:51,842 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:51,842 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:51,842 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:51,842 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:51,842 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:51,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741834_1010 (size=8107) 2024-11-17T15:32:51,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741834_1010 (size=8107) 2024-11-17T15:32:51,846 DEBUG [RS:0;7a780d55532c:35813 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/oldWALs 2024-11-17T15:32:51,846 INFO [RS:0;7a780d55532c:35813 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7a780d55532c%2C35813%2C1731857514065.meta:.meta(num 1731857514796) 2024-11-17T15:32:51,847 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:51,847 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:51,847 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:51,847 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:51,847 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:51,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741879_1055 (size=778) 2024-11-17T15:32:51,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741879_1055 (size=778) 2024-11-17T15:32:51,851 DEBUG [RS:0;7a780d55532c:35813 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/oldWALs 2024-11-17T15:32:51,851 INFO [RS:0;7a780d55532c:35813 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7a780d55532c%2C35813%2C1731857514065:(num 1731857571628) 2024-11-17T15:32:51,851 DEBUG [RS:0;7a780d55532c:35813 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:32:51,851 INFO [RS:0;7a780d55532c:35813 {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T15:32:51,851 INFO [RS:0;7a780d55532c:35813 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T15:32:51,851 INFO [RS:0;7a780d55532c:35813 {}] hbase.ChoreService(370): Chore service for: regionserver/7a780d55532c:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-17T15:32:51,851 INFO [RS:0;7a780d55532c:35813 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T15:32:51,851 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T15:32:51,851 INFO [RS:0;7a780d55532c:35813 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35813 2024-11-17T15:32:51,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35813-0x101268e68960001, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7a780d55532c,35813,1731857514065 2024-11-17T15:32:51,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39309-0x101268e68960000, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T15:32:51,853 INFO [RS:0;7a780d55532c:35813 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T15:32:51,855 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7a780d55532c,35813,1731857514065] 2024-11-17T15:32:51,857 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7a780d55532c,35813,1731857514065 already deleted, retry=false 2024-11-17T15:32:51,857 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7a780d55532c,35813,1731857514065 expired; onlineServers=0 2024-11-17T15:32:51,857 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7a780d55532c,39309,1731857514009' ***** 2024-11-17T15:32:51,857 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-17T15:32:51,857 INFO [M:0;7a780d55532c:39309 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T15:32:51,857 INFO [M:0;7a780d55532c:39309 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T15:32:51,857 DEBUG [M:0;7a780d55532c:39309 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-17T15:32:51,857 DEBUG [M:0;7a780d55532c:39309 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-17T15:32:51,857 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-17T15:32:51,857 DEBUG [master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.large.0-1731857514202 {}] cleaner.HFileCleaner(306): Exit Thread[master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.large.0-1731857514202,5,FailOnTimeoutGroup] 2024-11-17T15:32:51,857 DEBUG [master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.small.0-1731857514202 {}] cleaner.HFileCleaner(306): Exit Thread[master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.small.0-1731857514202,5,FailOnTimeoutGroup] 2024-11-17T15:32:51,857 INFO [M:0;7a780d55532c:39309 {}] hbase.ChoreService(370): Chore service for: master/7a780d55532c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-17T15:32:51,858 INFO [M:0;7a780d55532c:39309 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T15:32:51,858 DEBUG [M:0;7a780d55532c:39309 {}] master.HMaster(1795): Stopping service threads 2024-11-17T15:32:51,858 INFO [M:0;7a780d55532c:39309 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-17T15:32:51,858 INFO [M:0;7a780d55532c:39309 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T15:32:51,858 INFO [M:0;7a780d55532c:39309 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-17T15:32:51,858 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-17T15:32:51,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39309-0x101268e68960000, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-17T15:32:51,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39309-0x101268e68960000, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:32:51,860 DEBUG [M:0;7a780d55532c:39309 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-17T15:32:51,861 DEBUG [M:0;7a780d55532c:39309 {}] master.ActiveMasterManager(353): master:39309-0x101268e68960000, quorum=127.0.0.1:64124, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-17T15:32:51,861 INFO [M:0;7a780d55532c:39309 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/.lastflushedseqids 2024-11-17T15:32:51,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741880_1056 (size=228) 2024-11-17T15:32:51,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741880_1056 (size=228) 2024-11-17T15:32:51,866 INFO [M:0;7a780d55532c:39309 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-17T15:32:51,866 INFO [M:0;7a780d55532c:39309 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-17T15:32:51,866 DEBUG [M:0;7a780d55532c:39309 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T15:32:51,866 INFO [M:0;7a780d55532c:39309 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:32:51,866 DEBUG [M:0;7a780d55532c:39309 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:32:51,866 DEBUG [M:0;7a780d55532c:39309 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T15:32:51,866 DEBUG [M:0;7a780d55532c:39309 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:32:51,866 INFO [M:0;7a780d55532c:39309 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.42 KB heapSize=63.35 KB 2024-11-17T15:32:51,882 DEBUG [M:0;7a780d55532c:39309 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6b9afdd44225439f9de863216c24d3a5 is 82, key is hbase:meta,,1/info:regioninfo/1731857514828/Put/seqid=0 2024-11-17T15:32:51,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741881_1057 (size=5672) 2024-11-17T15:32:51,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741881_1057 (size=5672) 2024-11-17T15:32:51,887 INFO [M:0;7a780d55532c:39309 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6b9afdd44225439f9de863216c24d3a5 2024-11-17T15:32:51,905 DEBUG [M:0;7a780d55532c:39309 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/589527db37c8445e9f1c353cd93562c2 is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731857515257/Put/seqid=0 2024-11-17T15:32:51,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741882_1058 (size=7090) 2024-11-17T15:32:51,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741882_1058 (size=7090) 2024-11-17T15:32:51,910 INFO [M:0;7a780d55532c:39309 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.81 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/589527db37c8445e9f1c353cd93562c2 2024-11-17T15:32:51,913 INFO [M:0;7a780d55532c:39309 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 589527db37c8445e9f1c353cd93562c2 2024-11-17T15:32:51,927 DEBUG [M:0;7a780d55532c:39309 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0d7fae5708854319a74d26e8b1470ad9 is 69, key is 7a780d55532c,35813,1731857514065/rs:state/1731857514300/Put/seqid=0 2024-11-17T15:32:51,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741883_1059 (size=5156) 2024-11-17T15:32:51,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741883_1059 (size=5156) 2024-11-17T15:32:51,932 INFO [M:0;7a780d55532c:39309 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0d7fae5708854319a74d26e8b1470ad9 2024-11-17T15:32:51,950 DEBUG [M:0;7a780d55532c:39309 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/84f0773775814f7f97d3b9e5dedd520a is 52, key is load_balancer_on/state:d/1731857514889/Put/seqid=0 2024-11-17T15:32:51,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741884_1060 (size=5056) 2024-11-17T15:32:51,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741884_1060 (size=5056) 2024-11-17T15:32:51,955 INFO [M:0;7a780d55532c:39309 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/84f0773775814f7f97d3b9e5dedd520a 2024-11-17T15:32:51,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35813-0x101268e68960001, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T15:32:51,955 INFO [RS:0;7a780d55532c:35813 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T15:32:51,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35813-0x101268e68960001, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T15:32:51,955 INFO [RS:0;7a780d55532c:35813 {}] regionserver.HRegionServer(1031): Exiting; stopping=7a780d55532c,35813,1731857514065; zookeeper connection closed. 2024-11-17T15:32:51,955 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@56d560a8 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@56d560a8 2024-11-17T15:32:51,955 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-17T15:32:51,959 DEBUG [M:0;7a780d55532c:39309 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6b9afdd44225439f9de863216c24d3a5 as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6b9afdd44225439f9de863216c24d3a5 2024-11-17T15:32:51,963 INFO [M:0;7a780d55532c:39309 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6b9afdd44225439f9de863216c24d3a5, entries=8, sequenceid=125, filesize=5.5 K 2024-11-17T15:32:51,964 DEBUG [M:0;7a780d55532c:39309 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/589527db37c8445e9f1c353cd93562c2 as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/589527db37c8445e9f1c353cd93562c2 2024-11-17T15:32:51,967 INFO [M:0;7a780d55532c:39309 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 589527db37c8445e9f1c353cd93562c2 2024-11-17T15:32:51,968 INFO [M:0;7a780d55532c:39309 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/589527db37c8445e9f1c353cd93562c2, entries=13, sequenceid=125, filesize=6.9 K 2024-11-17T15:32:51,968 DEBUG [M:0;7a780d55532c:39309 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0d7fae5708854319a74d26e8b1470ad9 as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0d7fae5708854319a74d26e8b1470ad9 2024-11-17T15:32:51,972 INFO [M:0;7a780d55532c:39309 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0d7fae5708854319a74d26e8b1470ad9, entries=1, sequenceid=125, filesize=5.0 K 2024-11-17T15:32:51,973 DEBUG [M:0;7a780d55532c:39309 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/84f0773775814f7f97d3b9e5dedd520a as hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/84f0773775814f7f97d3b9e5dedd520a 2024-11-17T15:32:51,977 INFO [M:0;7a780d55532c:39309 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/2150bd7a-5e36-2b16-2830-fca44a59cecb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/84f0773775814f7f97d3b9e5dedd520a, entries=1, sequenceid=125, filesize=4.9 K 2024-11-17T15:32:51,977 INFO [M:0;7a780d55532c:39309 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 111ms, sequenceid=125, compaction requested=false 2024-11-17T15:32:51,979 INFO [M:0;7a780d55532c:39309 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:32:51,979 DEBUG [M:0;7a780d55532c:39309 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731857571866Disabling compacts and flushes for region at 1731857571866Disabling writes for close at 1731857571866Obtaining lock to block concurrent updates at 1731857571866Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731857571866Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52651, getHeapSize=64808, getOffHeapSize=0, getCellsCount=148 at 1731857571867 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731857571867Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731857571867Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731857571882 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731857571882Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731857571890 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731857571904 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731857571904Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731857571913 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731857571927 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731857571927Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731857571936 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731857571949 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731857571949Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3486b91: reopening flushed file at 1731857571959 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@a5d66f4: reopening flushed file at 1731857571963 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5f7a81: reopening flushed file at 1731857571968 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6709d4ef: reopening flushed file at 1731857571972 (+4 ms)Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 111ms, sequenceid=125, compaction requested=false at 1731857571977 (+5 ms)Writing region close event to WAL at 1731857571979 (+2 ms)Closed at 1731857571979 2024-11-17T15:32:51,979 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:51,979 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:51,980 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:51,980 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:51,980 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:51,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45075 is added to blk_1073741830_1006 (size=61320) 2024-11-17T15:32:51,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35763 is added to blk_1073741830_1006 (size=61320) 2024-11-17T15:32:51,982 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T15:32:51,982 INFO [M:0;7a780d55532c:39309 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-17T15:32:51,982 INFO [M:0;7a780d55532c:39309 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39309 2024-11-17T15:32:51,983 INFO [M:0;7a780d55532c:39309 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T15:32:52,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39309-0x101268e68960000, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T15:32:52,085 INFO [M:0;7a780d55532c:39309 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T15:32:52,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39309-0x101268e68960000, quorum=127.0.0.1:64124, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T15:32:52,087 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@24a90bdd{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:32:52,087 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@506744b5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T15:32:52,088 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T15:32:52,088 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2c08daf8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T15:32:52,088 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3af484fb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/hadoop.log.dir/,STOPPED} 2024-11-17T15:32:52,089 WARN [BP-171420599-172.17.0.2-1731857513292 heartbeating to localhost/127.0.0.1:41751 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T15:32:52,089 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T15:32:52,089 WARN [BP-171420599-172.17.0.2-1731857513292 heartbeating to localhost/127.0.0.1:41751 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-171420599-172.17.0.2-1731857513292 (Datanode Uuid 3fc17354-b591-450a-b632-df2dee3612a6) service to localhost/127.0.0.1:41751 2024-11-17T15:32:52,089 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T15:32:52,090 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/cluster_fea5238e-7690-69da-f68b-7e85fb014ef5/data/data3/current/BP-171420599-172.17.0.2-1731857513292 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:32:52,090 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/cluster_fea5238e-7690-69da-f68b-7e85fb014ef5/data/data4/current/BP-171420599-172.17.0.2-1731857513292 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:32:52,090 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T15:32:52,092 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2ea3c6d5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:32:52,092 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@624c2d5a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T15:32:52,092 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T15:32:52,093 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69291528{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T15:32:52,093 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@79974a7c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/hadoop.log.dir/,STOPPED} 2024-11-17T15:32:52,094 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T15:32:52,094 WARN [BP-171420599-172.17.0.2-1731857513292 heartbeating to localhost/127.0.0.1:41751 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T15:32:52,094 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T15:32:52,094 WARN [BP-171420599-172.17.0.2-1731857513292 heartbeating to localhost/127.0.0.1:41751 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-171420599-172.17.0.2-1731857513292 (Datanode Uuid e2e49079-5c86-487f-97b6-e6ab5af79260) service to localhost/127.0.0.1:41751 2024-11-17T15:32:52,095 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/cluster_fea5238e-7690-69da-f68b-7e85fb014ef5/data/data1/current/BP-171420599-172.17.0.2-1731857513292 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:32:52,095 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/cluster_fea5238e-7690-69da-f68b-7e85fb014ef5/data/data2/current/BP-171420599-172.17.0.2-1731857513292 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:32:52,095 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T15:32:52,101 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4b216a06{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T15:32:52,102 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@732e2119{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T15:32:52,102 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T15:32:52,102 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30d49282{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T15:32:52,102 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d2d8db5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/hadoop.log.dir/,STOPPED} 2024-11-17T15:32:52,109 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-17T15:32:52,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-17T15:32:52,151 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=231 (was 205) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41751 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:41751 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41751 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41751 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:41751 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:41751 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:41751 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41751 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41751 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=509 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=24 (was 47), ProcessCount=11 (was 11), AvailableMemoryMB=3572 (was 3612) 2024-11-17T15:32:52,160 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=231, OpenFileDescriptor=509, MaxFileDescriptor=1048576, SystemLoadAverage=24, ProcessCount=11, AvailableMemoryMB=3572 2024-11-17T15:32:52,160 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-17T15:32:52,160 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/hadoop.log.dir so I do NOT create it in target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c 2024-11-17T15:32:52,160 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/843f9c9a-3e64-b471-4a0a-660356212342/hadoop.tmp.dir so I do NOT create it in target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c 2024-11-17T15:32:52,160 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/cluster_d5303d50-70c1-d4f7-916b-be5078377a84, deleteOnExit=true 2024-11-17T15:32:52,160 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-17T15:32:52,160 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/test.cache.data in system properties and HBase conf 2024-11-17T15:32:52,161 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/hadoop.tmp.dir in system properties and HBase conf 2024-11-17T15:32:52,161 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/hadoop.log.dir in system properties and HBase conf 2024-11-17T15:32:52,161 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-17T15:32:52,161 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-17T15:32:52,161 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-17T15:32:52,161 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-17T15:32:52,161 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-17T15:32:52,161 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-17T15:32:52,161 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-17T15:32:52,161 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T15:32:52,161 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-17T15:32:52,161 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-17T15:32:52,161 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T15:32:52,161 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T15:32:52,161 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-17T15:32:52,162 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/nfs.dump.dir in system properties and HBase conf 2024-11-17T15:32:52,162 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/java.io.tmpdir in system properties and HBase conf 2024-11-17T15:32:52,162 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T15:32:52,162 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-17T15:32:52,162 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-17T15:32:52,175 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T15:32:52,243 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:32:52,248 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T15:32:52,249 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T15:32:52,249 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T15:32:52,249 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T15:32:52,250 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:32:52,250 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@33db594d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/hadoop.log.dir/,AVAILABLE} 2024-11-17T15:32:52,250 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@224e134c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T15:32:52,313 INFO [regionserver/7a780d55532c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T15:32:52,363 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@695f2019{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/java.io.tmpdir/jetty-localhost-39857-hadoop-hdfs-3_4_1-tests_jar-_-any-13080611712280982905/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T15:32:52,363 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@29527032{HTTP/1.1, (http/1.1)}{localhost:39857} 2024-11-17T15:32:52,363 INFO [Time-limited test {}] server.Server(415): Started @296285ms 2024-11-17T15:32:52,376 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T15:32:52,428 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:32:52,430 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T15:32:52,431 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T15:32:52,431 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T15:32:52,431 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T15:32:52,431 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4db8ccc1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/hadoop.log.dir/,AVAILABLE} 2024-11-17T15:32:52,431 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@356603ca{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T15:32:52,545 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@241be264{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/java.io.tmpdir/jetty-localhost-45055-hadoop-hdfs-3_4_1-tests_jar-_-any-12230183982973021551/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:32:52,546 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1c0f9d08{HTTP/1.1, (http/1.1)}{localhost:45055} 2024-11-17T15:32:52,546 INFO [Time-limited test {}] server.Server(415): Started @296467ms 2024-11-17T15:32:52,547 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T15:32:52,574 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T15:32:52,576 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T15:32:52,577 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T15:32:52,577 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T15:32:52,577 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T15:32:52,577 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a42ddf9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/hadoop.log.dir/,AVAILABLE} 2024-11-17T15:32:52,577 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@dbe3904{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T15:32:52,637 WARN [Thread-2489 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/cluster_d5303d50-70c1-d4f7-916b-be5078377a84/data/data1/current/BP-1961293918-172.17.0.2-1731857572181/current, will proceed with Du for space computation calculation, 2024-11-17T15:32:52,637 WARN [Thread-2490 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/cluster_d5303d50-70c1-d4f7-916b-be5078377a84/data/data2/current/BP-1961293918-172.17.0.2-1731857572181/current, will proceed with Du for space computation calculation, 2024-11-17T15:32:52,657 WARN [Thread-2468 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T15:32:52,659 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe3d45f16c9130ec3 with lease ID 0x5f1de23604b07f92: Processing first storage report for DS-7d41e505-9171-4ce3-bba3-5295e5856ce8 from datanode DatanodeRegistration(127.0.0.1:38189, datanodeUuid=f75532c3-21ed-417b-be58-fae85f96817d, infoPort=46311, infoSecurePort=0, ipcPort=38043, storageInfo=lv=-57;cid=testClusterID;nsid=1722830454;c=1731857572181) 2024-11-17T15:32:52,659 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe3d45f16c9130ec3 with lease ID 0x5f1de23604b07f92: from storage DS-7d41e505-9171-4ce3-bba3-5295e5856ce8 node DatanodeRegistration(127.0.0.1:38189, datanodeUuid=f75532c3-21ed-417b-be58-fae85f96817d, infoPort=46311, infoSecurePort=0, ipcPort=38043, storageInfo=lv=-57;cid=testClusterID;nsid=1722830454;c=1731857572181), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T15:32:52,659 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe3d45f16c9130ec3 with lease ID 0x5f1de23604b07f92: Processing first storage report for DS-9f991bc3-0d21-4831-9b95-9f7c96bb44c6 from datanode DatanodeRegistration(127.0.0.1:38189, datanodeUuid=f75532c3-21ed-417b-be58-fae85f96817d, infoPort=46311, infoSecurePort=0, ipcPort=38043, storageInfo=lv=-57;cid=testClusterID;nsid=1722830454;c=1731857572181) 2024-11-17T15:32:52,659 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe3d45f16c9130ec3 with lease ID 0x5f1de23604b07f92: from storage DS-9f991bc3-0d21-4831-9b95-9f7c96bb44c6 node DatanodeRegistration(127.0.0.1:38189, datanodeUuid=f75532c3-21ed-417b-be58-fae85f96817d, infoPort=46311, infoSecurePort=0, ipcPort=38043, storageInfo=lv=-57;cid=testClusterID;nsid=1722830454;c=1731857572181), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T15:32:52,695 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5f055e5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/java.io.tmpdir/jetty-localhost-40095-hadoop-hdfs-3_4_1-tests_jar-_-any-9762163905634980217/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:32:52,695 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2884ece6{HTTP/1.1, (http/1.1)}{localhost:40095} 2024-11-17T15:32:52,695 INFO [Time-limited test {}] server.Server(415): Started @296617ms 2024-11-17T15:32:52,696 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T15:32:52,781 WARN [Thread-2515 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/cluster_d5303d50-70c1-d4f7-916b-be5078377a84/data/data3/current/BP-1961293918-172.17.0.2-1731857572181/current, will proceed with Du for space computation calculation, 2024-11-17T15:32:52,781 WARN [Thread-2516 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/cluster_d5303d50-70c1-d4f7-916b-be5078377a84/data/data4/current/BP-1961293918-172.17.0.2-1731857572181/current, will proceed with Du for space computation calculation, 2024-11-17T15:32:52,797 WARN [Thread-2504 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T15:32:52,799 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xec81146f994082b6 with lease ID 0x5f1de23604b07f93: Processing first storage report for DS-3f8aeeee-fe69-4726-9c0f-cec89c8bddba from datanode DatanodeRegistration(127.0.0.1:37887, datanodeUuid=12ef5aaf-6c28-4793-8bed-b1d203941f8d, infoPort=33313, infoSecurePort=0, ipcPort=37807, storageInfo=lv=-57;cid=testClusterID;nsid=1722830454;c=1731857572181) 2024-11-17T15:32:52,799 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xec81146f994082b6 with lease ID 0x5f1de23604b07f93: from storage DS-3f8aeeee-fe69-4726-9c0f-cec89c8bddba node DatanodeRegistration(127.0.0.1:37887, datanodeUuid=12ef5aaf-6c28-4793-8bed-b1d203941f8d, infoPort=33313, infoSecurePort=0, ipcPort=37807, storageInfo=lv=-57;cid=testClusterID;nsid=1722830454;c=1731857572181), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T15:32:52,799 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xec81146f994082b6 with lease ID 0x5f1de23604b07f93: Processing first storage report for DS-4ea2ebb2-5c22-4bd8-933f-60be2beaf4a4 from datanode DatanodeRegistration(127.0.0.1:37887, datanodeUuid=12ef5aaf-6c28-4793-8bed-b1d203941f8d, infoPort=33313, infoSecurePort=0, ipcPort=37807, storageInfo=lv=-57;cid=testClusterID;nsid=1722830454;c=1731857572181) 2024-11-17T15:32:52,799 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xec81146f994082b6 with lease ID 0x5f1de23604b07f93: from storage DS-4ea2ebb2-5c22-4bd8-933f-60be2beaf4a4 node DatanodeRegistration(127.0.0.1:37887, datanodeUuid=12ef5aaf-6c28-4793-8bed-b1d203941f8d, infoPort=33313, infoSecurePort=0, ipcPort=37807, storageInfo=lv=-57;cid=testClusterID;nsid=1722830454;c=1731857572181), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T15:32:52,817 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:52,817 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:52,818 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c 2024-11-17T15:32:52,820 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/cluster_d5303d50-70c1-d4f7-916b-be5078377a84/zookeeper_0, clientPort=58465, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/cluster_d5303d50-70c1-d4f7-916b-be5078377a84/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/cluster_d5303d50-70c1-d4f7-916b-be5078377a84/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-17T15:32:52,821 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58465 2024-11-17T15:32:52,821 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:32:52,822 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:32:52,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37887 is added to blk_1073741825_1001 (size=7) 2024-11-17T15:32:52,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38189 is added to blk_1073741825_1001 (size=7) 2024-11-17T15:32:52,830 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18 with version=8 2024-11-17T15:32:52,830 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39521/user/jenkins/test-data/769c6738-bcb9-0289-79d6-cf6f3f2ad69d/hbase-staging 2024-11-17T15:32:52,832 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7a780d55532c:0 server-side Connection retries=45 2024-11-17T15:32:52,832 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T15:32:52,832 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T15:32:52,832 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T15:32:52,832 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T15:32:52,832 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T15:32:52,832 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-17T15:32:52,832 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T15:32:52,833 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42239 2024-11-17T15:32:52,833 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42239 connecting to ZooKeeper ensemble=127.0.0.1:58465 2024-11-17T15:32:52,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:422390x0, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T15:32:52,839 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42239-0x101268f4e630000 connected 2024-11-17T15:32:52,854 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:32:52,855 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:32:52,857 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42239-0x101268f4e630000, quorum=127.0.0.1:58465, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T15:32:52,857 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18, hbase.cluster.distributed=false 2024-11-17T15:32:52,858 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42239-0x101268f4e630000, quorum=127.0.0.1:58465, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T15:32:52,859 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42239 2024-11-17T15:32:52,859 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42239 2024-11-17T15:32:52,859 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42239 2024-11-17T15:32:52,860 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42239 2024-11-17T15:32:52,860 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42239 2024-11-17T15:32:52,874 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7a780d55532c:0 server-side Connection retries=45 2024-11-17T15:32:52,874 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T15:32:52,874 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T15:32:52,874 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T15:32:52,874 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T15:32:52,874 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T15:32:52,874 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-17T15:32:52,875 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T15:32:52,875 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40625 2024-11-17T15:32:52,876 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40625 connecting to ZooKeeper ensemble=127.0.0.1:58465 2024-11-17T15:32:52,876 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:32:52,878 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:32:52,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:406250x0, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T15:32:52,882 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:406250x0, quorum=127.0.0.1:58465, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T15:32:52,882 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40625-0x101268f4e630001 connected 2024-11-17T15:32:52,882 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-17T15:32:52,883 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-17T15:32:52,883 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40625-0x101268f4e630001, quorum=127.0.0.1:58465, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-17T15:32:52,884 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40625-0x101268f4e630001, quorum=127.0.0.1:58465, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T15:32:52,885 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40625 2024-11-17T15:32:52,885 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40625 2024-11-17T15:32:52,885 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40625 2024-11-17T15:32:52,885 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40625 2024-11-17T15:32:52,885 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40625 2024-11-17T15:32:52,896 DEBUG [M:0;7a780d55532c:42239 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7a780d55532c:42239 2024-11-17T15:32:52,897 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7a780d55532c,42239,1731857572831 2024-11-17T15:32:52,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42239-0x101268f4e630000, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T15:32:52,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40625-0x101268f4e630001, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T15:32:52,899 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42239-0x101268f4e630000, quorum=127.0.0.1:58465, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7a780d55532c,42239,1731857572831 2024-11-17T15:32:52,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40625-0x101268f4e630001, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-17T15:32:52,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42239-0x101268f4e630000, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:32:52,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40625-0x101268f4e630001, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:32:52,901 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42239-0x101268f4e630000, quorum=127.0.0.1:58465, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-17T15:32:52,902 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7a780d55532c,42239,1731857572831 from backup master directory 2024-11-17T15:32:52,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42239-0x101268f4e630000, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7a780d55532c,42239,1731857572831 2024-11-17T15:32:52,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40625-0x101268f4e630001, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T15:32:52,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42239-0x101268f4e630000, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T15:32:52,903 WARN [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T15:32:52,903 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7a780d55532c,42239,1731857572831 2024-11-17T15:32:52,906 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/hbase.id] with ID: 6c563126-611c-4092-bdeb-608493e970d9 2024-11-17T15:32:52,906 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/.tmp/hbase.id 2024-11-17T15:32:52,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37887 is added to blk_1073741826_1002 (size=42) 2024-11-17T15:32:52,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38189 is added to blk_1073741826_1002 (size=42) 2024-11-17T15:32:52,912 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/.tmp/hbase.id]:[hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/hbase.id] 2024-11-17T15:32:52,921 INFO [master/7a780d55532c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:32:52,921 INFO [master/7a780d55532c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-17T15:32:52,922 INFO [master/7a780d55532c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-17T15:32:52,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42239-0x101268f4e630000, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:32:52,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40625-0x101268f4e630001, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:32:52,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38189 is added to blk_1073741827_1003 (size=196) 2024-11-17T15:32:52,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37887 is added to blk_1073741827_1003 (size=196) 2024-11-17T15:32:52,930 INFO [master/7a780d55532c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T15:32:52,931 INFO [master/7a780d55532c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-17T15:32:52,931 INFO [master/7a780d55532c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T15:32:52,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38189 is added to blk_1073741828_1004 (size=1189) 2024-11-17T15:32:52,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37887 is added to blk_1073741828_1004 (size=1189) 2024-11-17T15:32:52,939 INFO [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/MasterData/data/master/store 2024-11-17T15:32:52,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38189 is added to blk_1073741829_1005 (size=34) 2024-11-17T15:32:52,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37887 is added to blk_1073741829_1005 (size=34) 2024-11-17T15:32:52,944 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:32:52,944 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T15:32:52,944 INFO [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:32:52,944 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:32:52,944 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T15:32:52,944 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:32:52,944 INFO [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:32:52,944 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731857572944Disabling compacts and flushes for region at 1731857572944Disabling writes for close at 1731857572944Writing region close event to WAL at 1731857572944Closed at 1731857572944 2024-11-17T15:32:52,945 WARN [master/7a780d55532c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/MasterData/data/master/store/.initializing 2024-11-17T15:32:52,945 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/MasterData/WALs/7a780d55532c,42239,1731857572831 2024-11-17T15:32:52,948 INFO [master/7a780d55532c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a780d55532c%2C42239%2C1731857572831, suffix=, logDir=hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/MasterData/WALs/7a780d55532c,42239,1731857572831, archiveDir=hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/MasterData/oldWALs, maxLogs=10 2024-11-17T15:32:52,948 INFO [master/7a780d55532c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C42239%2C1731857572831.1731857572948 2024-11-17T15:32:52,952 INFO [master/7a780d55532c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/MasterData/WALs/7a780d55532c,42239,1731857572831/7a780d55532c%2C42239%2C1731857572831.1731857572948 2024-11-17T15:32:52,956 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33313:33313),(127.0.0.1/127.0.0.1:46311:46311)] 2024-11-17T15:32:52,958 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-17T15:32:52,958 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:32:52,958 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:32:52,958 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:32:52,959 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:32:52,960 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-17T15:32:52,960 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:32:52,961 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:32:52,961 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:32:52,962 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-17T15:32:52,962 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:32:52,962 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T15:32:52,962 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:32:52,963 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-17T15:32:52,963 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:32:52,963 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T15:32:52,963 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:32:52,964 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-17T15:32:52,964 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:32:52,964 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T15:32:52,965 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:32:52,965 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:32:52,965 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:32:52,966 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:32:52,966 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:32:52,967 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-17T15:32:52,968 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T15:32:52,969 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T15:32:52,970 INFO [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=703531, jitterRate=-0.10541419684886932}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-17T15:32:52,970 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731857572958Initializing all the Stores at 1731857572959 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857572959Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857572959Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857572959Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857572959Cleaning up temporary data from old regions at 1731857572966 (+7 ms)Region opened successfully at 1731857572970 (+4 ms) 2024-11-17T15:32:52,971 INFO [master/7a780d55532c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-17T15:32:52,973 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f64005b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7a780d55532c/172.17.0.2:0 2024-11-17T15:32:52,974 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-17T15:32:52,974 INFO [master/7a780d55532c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-17T15:32:52,974 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-17T15:32:52,974 INFO [master/7a780d55532c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-17T15:32:52,974 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-17T15:32:52,975 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-17T15:32:52,975 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-17T15:32:52,977 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-17T15:32:52,978 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42239-0x101268f4e630000, quorum=127.0.0.1:58465, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-17T15:32:52,979 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-17T15:32:52,979 INFO [master/7a780d55532c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-17T15:32:52,980 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42239-0x101268f4e630000, quorum=127.0.0.1:58465, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-17T15:32:52,982 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-17T15:32:52,982 INFO [master/7a780d55532c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-17T15:32:52,983 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42239-0x101268f4e630000, quorum=127.0.0.1:58465, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-17T15:32:52,984 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-17T15:32:52,984 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42239-0x101268f4e630000, quorum=127.0.0.1:58465, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-17T15:32:52,986 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-17T15:32:52,987 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42239-0x101268f4e630000, quorum=127.0.0.1:58465, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-17T15:32:52,988 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-17T15:32:52,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40625-0x101268f4e630001, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T15:32:52,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42239-0x101268f4e630000, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T15:32:52,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40625-0x101268f4e630001, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:32:52,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42239-0x101268f4e630000, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:32:52,991 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7a780d55532c,42239,1731857572831, sessionid=0x101268f4e630000, setting cluster-up flag (Was=false) 2024-11-17T15:32:52,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40625-0x101268f4e630001, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:32:52,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42239-0x101268f4e630000, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:32:53,000 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-17T15:32:53,000 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7a780d55532c,42239,1731857572831 2024-11-17T15:32:53,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42239-0x101268f4e630000, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:32:53,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40625-0x101268f4e630001, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:32:53,008 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-17T15:32:53,009 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7a780d55532c,42239,1731857572831 2024-11-17T15:32:53,010 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-17T15:32:53,011 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-17T15:32:53,012 INFO [master/7a780d55532c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-17T15:32:53,012 INFO [master/7a780d55532c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-17T15:32:53,012 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7a780d55532c,42239,1731857572831 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-17T15:32:53,013 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7a780d55532c:0, corePoolSize=5, maxPoolSize=5 2024-11-17T15:32:53,013 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7a780d55532c:0, corePoolSize=5, maxPoolSize=5 2024-11-17T15:32:53,013 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7a780d55532c:0, corePoolSize=5, maxPoolSize=5 2024-11-17T15:32:53,013 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7a780d55532c:0, corePoolSize=5, maxPoolSize=5 2024-11-17T15:32:53,013 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7a780d55532c:0, corePoolSize=10, maxPoolSize=10 2024-11-17T15:32:53,013 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:32:53,013 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7a780d55532c:0, corePoolSize=2, maxPoolSize=2 2024-11-17T15:32:53,014 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:32:53,014 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731857603014 2024-11-17T15:32:53,014 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-17T15:32:53,015 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-17T15:32:53,015 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-17T15:32:53,015 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-17T15:32:53,015 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-17T15:32:53,015 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-17T15:32:53,015 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T15:32:53,015 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T15:32:53,015 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-17T15:32:53,015 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-17T15:32:53,015 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-17T15:32:53,015 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-17T15:32:53,016 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:32:53,016 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-17T15:32:53,016 INFO [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-17T15:32:53,016 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-17T15:32:53,016 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.large.0-1731857573016,5,FailOnTimeoutGroup] 2024-11-17T15:32:53,017 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.small.0-1731857573016,5,FailOnTimeoutGroup] 2024-11-17T15:32:53,017 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T15:32:53,017 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-17T15:32:53,017 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-17T15:32:53,017 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-17T15:32:53,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37887 is added to blk_1073741831_1007 (size=1321) 2024-11-17T15:32:53,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38189 is added to blk_1073741831_1007 (size=1321) 2024-11-17T15:32:53,029 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-17T15:32:53,029 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18 2024-11-17T15:32:53,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38189 is added to blk_1073741832_1008 (size=32) 2024-11-17T15:32:53,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37887 is added to blk_1073741832_1008 (size=32) 2024-11-17T15:32:53,036 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:32:53,040 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T15:32:53,042 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T15:32:53,042 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:32:53,042 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:32:53,042 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T15:32:53,043 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T15:32:53,043 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:32:53,044 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:32:53,044 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T15:32:53,044 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T15:32:53,044 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:32:53,045 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:32:53,045 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T15:32:53,046 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T15:32:53,046 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:32:53,046 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:32:53,046 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T15:32:53,047 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/data/hbase/meta/1588230740 2024-11-17T15:32:53,047 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/data/hbase/meta/1588230740 2024-11-17T15:32:53,048 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T15:32:53,048 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T15:32:53,048 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T15:32:53,049 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T15:32:53,051 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T15:32:53,051 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=691151, jitterRate=-0.12115618586540222}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T15:32:53,052 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731857573036Initializing all the Stores at 1731857573037 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857573037Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857573040 (+3 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857573040Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857573040Cleaning up temporary data from old regions at 1731857573048 (+8 ms)Region opened successfully at 1731857573052 (+4 ms) 2024-11-17T15:32:53,052 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T15:32:53,052 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T15:32:53,052 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T15:32:53,052 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T15:32:53,052 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T15:32:53,052 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T15:32:53,052 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731857573052Disabling compacts and flushes for region at 1731857573052Disabling writes for close at 1731857573052Writing region close event to WAL at 1731857573052Closed at 1731857573052 2024-11-17T15:32:53,053 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T15:32:53,053 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-17T15:32:53,053 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-17T15:32:53,054 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T15:32:53,055 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-17T15:32:53,087 INFO [RS:0;7a780d55532c:40625 {}] regionserver.HRegionServer(746): ClusterId : 6c563126-611c-4092-bdeb-608493e970d9 2024-11-17T15:32:53,087 DEBUG [RS:0;7a780d55532c:40625 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-17T15:32:53,089 DEBUG [RS:0;7a780d55532c:40625 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-17T15:32:53,089 DEBUG [RS:0;7a780d55532c:40625 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-17T15:32:53,092 DEBUG [RS:0;7a780d55532c:40625 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-17T15:32:53,092 DEBUG [RS:0;7a780d55532c:40625 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47b594f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7a780d55532c/172.17.0.2:0 2024-11-17T15:32:53,104 DEBUG [RS:0;7a780d55532c:40625 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7a780d55532c:40625 2024-11-17T15:32:53,104 INFO [RS:0;7a780d55532c:40625 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-17T15:32:53,104 INFO [RS:0;7a780d55532c:40625 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-17T15:32:53,104 DEBUG [RS:0;7a780d55532c:40625 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-17T15:32:53,105 INFO [RS:0;7a780d55532c:40625 {}] regionserver.HRegionServer(2659): reportForDuty to master=7a780d55532c,42239,1731857572831 with port=40625, startcode=1731857572874 2024-11-17T15:32:53,105 DEBUG [RS:0;7a780d55532c:40625 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-17T15:32:53,107 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35299, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-17T15:32:53,107 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42239 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7a780d55532c,40625,1731857572874 2024-11-17T15:32:53,107 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42239 {}] master.ServerManager(517): Registering regionserver=7a780d55532c,40625,1731857572874 2024-11-17T15:32:53,109 DEBUG [RS:0;7a780d55532c:40625 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18 2024-11-17T15:32:53,109 DEBUG [RS:0;7a780d55532c:40625 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45087 2024-11-17T15:32:53,109 DEBUG [RS:0;7a780d55532c:40625 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-17T15:32:53,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42239-0x101268f4e630000, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T15:32:53,110 DEBUG [RS:0;7a780d55532c:40625 {}] zookeeper.ZKUtil(111): regionserver:40625-0x101268f4e630001, quorum=127.0.0.1:58465, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7a780d55532c,40625,1731857572874 2024-11-17T15:32:53,110 WARN [RS:0;7a780d55532c:40625 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T15:32:53,110 INFO [RS:0;7a780d55532c:40625 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T15:32:53,111 DEBUG [RS:0;7a780d55532c:40625 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/WALs/7a780d55532c,40625,1731857572874 2024-11-17T15:32:53,111 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7a780d55532c,40625,1731857572874] 2024-11-17T15:32:53,113 INFO [RS:0;7a780d55532c:40625 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-17T15:32:53,115 INFO [RS:0;7a780d55532c:40625 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-17T15:32:53,115 INFO [RS:0;7a780d55532c:40625 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-17T15:32:53,115 INFO [RS:0;7a780d55532c:40625 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T15:32:53,116 INFO [RS:0;7a780d55532c:40625 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-17T15:32:53,117 INFO [RS:0;7a780d55532c:40625 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-17T15:32:53,117 INFO [RS:0;7a780d55532c:40625 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-17T15:32:53,117 DEBUG [RS:0;7a780d55532c:40625 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:32:53,117 DEBUG [RS:0;7a780d55532c:40625 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:32:53,117 DEBUG [RS:0;7a780d55532c:40625 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:32:53,117 DEBUG [RS:0;7a780d55532c:40625 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:32:53,117 DEBUG [RS:0;7a780d55532c:40625 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:32:53,117 DEBUG [RS:0;7a780d55532c:40625 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7a780d55532c:0, corePoolSize=2, maxPoolSize=2 2024-11-17T15:32:53,117 DEBUG [RS:0;7a780d55532c:40625 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:32:53,117 DEBUG [RS:0;7a780d55532c:40625 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:32:53,117 DEBUG [RS:0;7a780d55532c:40625 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:32:53,117 DEBUG [RS:0;7a780d55532c:40625 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:32:53,117 DEBUG [RS:0;7a780d55532c:40625 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:32:53,117 DEBUG [RS:0;7a780d55532c:40625 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7a780d55532c:0, corePoolSize=1, maxPoolSize=1 2024-11-17T15:32:53,117 DEBUG [RS:0;7a780d55532c:40625 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7a780d55532c:0, corePoolSize=3, maxPoolSize=3 2024-11-17T15:32:53,117 DEBUG [RS:0;7a780d55532c:40625 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7a780d55532c:0, corePoolSize=3, maxPoolSize=3 2024-11-17T15:32:53,117 INFO [RS:0;7a780d55532c:40625 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T15:32:53,117 INFO [RS:0;7a780d55532c:40625 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T15:32:53,117 INFO [RS:0;7a780d55532c:40625 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T15:32:53,117 INFO [RS:0;7a780d55532c:40625 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-17T15:32:53,117 INFO [RS:0;7a780d55532c:40625 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-17T15:32:53,117 INFO [RS:0;7a780d55532c:40625 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,40625,1731857572874-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T15:32:53,131 INFO [RS:0;7a780d55532c:40625 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-17T15:32:53,131 INFO [RS:0;7a780d55532c:40625 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,40625,1731857572874-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T15:32:53,131 INFO [RS:0;7a780d55532c:40625 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:32:53,131 INFO [RS:0;7a780d55532c:40625 {}] regionserver.Replication(171): 7a780d55532c,40625,1731857572874 started 2024-11-17T15:32:53,144 INFO [RS:0;7a780d55532c:40625 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:32:53,144 INFO [RS:0;7a780d55532c:40625 {}] regionserver.HRegionServer(1482): Serving as 7a780d55532c,40625,1731857572874, RpcServer on 7a780d55532c/172.17.0.2:40625, sessionid=0x101268f4e630001 2024-11-17T15:32:53,145 DEBUG [RS:0;7a780d55532c:40625 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-17T15:32:53,145 DEBUG [RS:0;7a780d55532c:40625 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7a780d55532c,40625,1731857572874 2024-11-17T15:32:53,145 DEBUG [RS:0;7a780d55532c:40625 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a780d55532c,40625,1731857572874' 2024-11-17T15:32:53,145 DEBUG [RS:0;7a780d55532c:40625 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-17T15:32:53,145 DEBUG [RS:0;7a780d55532c:40625 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-17T15:32:53,145 DEBUG [RS:0;7a780d55532c:40625 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-17T15:32:53,145 DEBUG [RS:0;7a780d55532c:40625 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-17T15:32:53,145 DEBUG [RS:0;7a780d55532c:40625 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7a780d55532c,40625,1731857572874 2024-11-17T15:32:53,145 DEBUG [RS:0;7a780d55532c:40625 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a780d55532c,40625,1731857572874' 2024-11-17T15:32:53,145 DEBUG [RS:0;7a780d55532c:40625 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-17T15:32:53,146 DEBUG [RS:0;7a780d55532c:40625 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-17T15:32:53,146 DEBUG [RS:0;7a780d55532c:40625 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-17T15:32:53,146 INFO [RS:0;7a780d55532c:40625 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-17T15:32:53,146 INFO [RS:0;7a780d55532c:40625 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-17T15:32:53,205 WARN [7a780d55532c:42239 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-17T15:32:53,248 INFO [RS:0;7a780d55532c:40625 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a780d55532c%2C40625%2C1731857572874, suffix=, logDir=hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/WALs/7a780d55532c,40625,1731857572874, archiveDir=hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/oldWALs, maxLogs=32 2024-11-17T15:32:53,248 INFO [RS:0;7a780d55532c:40625 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C40625%2C1731857572874.1731857573248 2024-11-17T15:32:53,253 INFO [RS:0;7a780d55532c:40625 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/WALs/7a780d55532c,40625,1731857572874/7a780d55532c%2C40625%2C1731857572874.1731857573248 2024-11-17T15:32:53,258 DEBUG [RS:0;7a780d55532c:40625 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46311:46311),(127.0.0.1/127.0.0.1:33313:33313)] 2024-11-17T15:32:53,456 DEBUG [7a780d55532c:42239 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-17T15:32:53,456 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7a780d55532c,40625,1731857572874 2024-11-17T15:32:53,457 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7a780d55532c,40625,1731857572874, state=OPENING 2024-11-17T15:32:53,459 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-17T15:32:53,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42239-0x101268f4e630000, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:32:53,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40625-0x101268f4e630001, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:32:53,461 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T15:32:53,461 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T15:32:53,461 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T15:32:53,461 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7a780d55532c,40625,1731857572874}] 2024-11-17T15:32:53,613 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-17T15:32:53,615 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43629, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-17T15:32:53,618 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-17T15:32:53,618 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T15:32:53,619 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a780d55532c%2C40625%2C1731857572874.meta, suffix=.meta, logDir=hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/WALs/7a780d55532c,40625,1731857572874, archiveDir=hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/oldWALs, maxLogs=32 2024-11-17T15:32:53,619 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a780d55532c%2C40625%2C1731857572874.meta.1731857573619.meta 2024-11-17T15:32:53,624 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/WALs/7a780d55532c,40625,1731857572874/7a780d55532c%2C40625%2C1731857572874.meta.1731857573619.meta 2024-11-17T15:32:53,632 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33313:33313),(127.0.0.1/127.0.0.1:46311:46311)] 2024-11-17T15:32:53,637 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-17T15:32:53,637 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-17T15:32:53,637 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-17T15:32:53,637 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-17T15:32:53,637 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-17T15:32:53,637 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T15:32:53,637 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-17T15:32:53,637 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-17T15:32:53,641 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T15:32:53,641 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T15:32:53,641 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:32:53,642 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:32:53,642 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T15:32:53,642 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T15:32:53,642 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:32:53,643 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:32:53,643 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T15:32:53,643 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T15:32:53,643 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:32:53,644 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:32:53,644 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T15:32:53,644 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T15:32:53,644 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T15:32:53,644 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T15:32:53,644 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T15:32:53,645 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/data/hbase/meta/1588230740 2024-11-17T15:32:53,646 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/data/hbase/meta/1588230740 2024-11-17T15:32:53,647 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T15:32:53,647 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T15:32:53,647 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T15:32:53,648 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T15:32:53,649 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=832221, jitterRate=0.05822417140007019}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T15:32:53,649 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-17T15:32:53,649 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731857573637Writing region info on filesystem at 1731857573637Initializing all the Stores at 1731857573638 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857573638Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857573641 (+3 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731857573641Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731857573641Cleaning up temporary data from old regions at 1731857573647 (+6 ms)Running coprocessor post-open hooks at 1731857573649 (+2 ms)Region opened successfully at 1731857573649 2024-11-17T15:32:53,650 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731857573613 2024-11-17T15:32:53,652 DEBUG [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-17T15:32:53,652 INFO [RS_OPEN_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-17T15:32:53,652 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7a780d55532c,40625,1731857572874 2024-11-17T15:32:53,653 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7a780d55532c,40625,1731857572874, state=OPEN 2024-11-17T15:32:53,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42239-0x101268f4e630000, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T15:32:53,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40625-0x101268f4e630001, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T15:32:53,658 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7a780d55532c,40625,1731857572874 2024-11-17T15:32:53,658 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T15:32:53,658 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T15:32:53,661 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-17T15:32:53,661 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7a780d55532c,40625,1731857572874 in 197 msec 2024-11-17T15:32:53,662 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-17T15:32:53,662 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 608 msec 2024-11-17T15:32:53,663 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T15:32:53,663 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-17T15:32:53,664 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T15:32:53,664 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7a780d55532c,40625,1731857572874, seqNum=-1] 2024-11-17T15:32:53,665 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T15:32:53,666 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58837, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T15:32:53,670 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 658 msec 2024-11-17T15:32:53,670 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731857573670, completionTime=-1 2024-11-17T15:32:53,670 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-17T15:32:53,670 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-17T15:32:53,672 INFO [master/7a780d55532c:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-17T15:32:53,672 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731857633672 2024-11-17T15:32:53,672 INFO [master/7a780d55532c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731857693672 2024-11-17T15:32:53,672 INFO [master/7a780d55532c:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-17T15:32:53,672 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,42239,1731857572831-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T15:32:53,672 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,42239,1731857572831-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:32:53,672 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,42239,1731857572831-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:32:53,672 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7a780d55532c:42239, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:32:53,672 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-17T15:32:53,673 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-17T15:32:53,673 DEBUG [master/7a780d55532c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-17T15:32:53,675 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.772sec 2024-11-17T15:32:53,676 INFO [master/7a780d55532c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-17T15:32:53,676 INFO [master/7a780d55532c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-17T15:32:53,676 INFO [master/7a780d55532c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-17T15:32:53,676 INFO [master/7a780d55532c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-17T15:32:53,676 INFO [master/7a780d55532c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-17T15:32:53,676 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,42239,1731857572831-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T15:32:53,676 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,42239,1731857572831-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-17T15:32:53,677 DEBUG [master/7a780d55532c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-17T15:32:53,677 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-17T15:32:53,677 INFO [master/7a780d55532c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a780d55532c,42239,1731857572831-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T15:32:53,687 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7031f48, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T15:32:53,687 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7a780d55532c,42239,-1 for getting cluster id 2024-11-17T15:32:53,687 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-17T15:32:53,688 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6c563126-611c-4092-bdeb-608493e970d9' 2024-11-17T15:32:53,689 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-17T15:32:53,689 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6c563126-611c-4092-bdeb-608493e970d9" 2024-11-17T15:32:53,689 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@222ad495, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T15:32:53,689 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7a780d55532c,42239,-1] 2024-11-17T15:32:53,689 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-17T15:32:53,689 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:32:53,690 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53136, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-17T15:32:53,691 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f794eda, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T15:32:53,691 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T15:32:53,691 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7a780d55532c,40625,1731857572874, seqNum=-1] 2024-11-17T15:32:53,692 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T15:32:53,692 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40150, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T15:32:53,694 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7a780d55532c,42239,1731857572831 2024-11-17T15:32:53,694 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T15:32:53,696 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-17T15:32:53,696 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T15:32:53,698 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/WALs/test.com,8080,1, archiveDir=hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/oldWALs, maxLogs=32 2024-11-17T15:32:53,698 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731857573698 2024-11-17T15:32:53,702 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/WALs/test.com,8080,1/test.com%2C8080%2C1.1731857573698 2024-11-17T15:32:53,703 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46311:46311),(127.0.0.1/127.0.0.1:33313:33313)] 2024-11-17T15:32:53,704 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731857573703 2024-11-17T15:32:53,707 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:53,707 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:53,707 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:53,707 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:53,708 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:53,708 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/WALs/test.com,8080,1/test.com%2C8080%2C1.1731857573698 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/WALs/test.com,8080,1/test.com%2C8080%2C1.1731857573703 2024-11-17T15:32:53,708 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46311:46311),(127.0.0.1/127.0.0.1:33313:33313)] 2024-11-17T15:32:53,708 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/WALs/test.com,8080,1/test.com%2C8080%2C1.1731857573698 is not closed yet, will try archiving it next time 2024-11-17T15:32:53,709 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:53,709 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:53,709 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:53,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37887 is added to blk_1073741835_1011 (size=93) 2024-11-17T15:32:53,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38189 is added to blk_1073741835_1011 (size=93) 2024-11-17T15:32:53,710 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/WALs/test.com,8080,1/test.com%2C8080%2C1.1731857573698 to hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/oldWALs/test.com%2C8080%2C1.1731857573698 2024-11-17T15:32:53,712 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:53,712 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:53,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37887 is added to blk_1073741836_1012 (size=93) 2024-11-17T15:32:53,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38189 is added to blk_1073741836_1012 (size=93) 2024-11-17T15:32:53,715 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/oldWALs 2024-11-17T15:32:53,715 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731857573703) 2024-11-17T15:32:53,715 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-17T15:32:53,715 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T15:32:53,715 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T15:32:53,715 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:32:53,716 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:32:53,716 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-17T15:32:53,716 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-17T15:32:53,716 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1247014602, stopped=false 2024-11-17T15:32:53,716 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7a780d55532c,42239,1731857572831 2024-11-17T15:32:53,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42239-0x101268f4e630000, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T15:32:53,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40625-0x101268f4e630001, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T15:32:53,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42239-0x101268f4e630000, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:32:53,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40625-0x101268f4e630001, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:32:53,717 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T15:32:53,717 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T15:32:53,717 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T15:32:53,718 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:32:53,718 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42239-0x101268f4e630000, quorum=127.0.0.1:58465, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T15:32:53,718 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7a780d55532c,40625,1731857572874' ***** 2024-11-17T15:32:53,718 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40625-0x101268f4e630001, quorum=127.0.0.1:58465, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T15:32:53,718 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-17T15:32:53,718 INFO [RS:0;7a780d55532c:40625 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-17T15:32:53,718 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-17T15:32:53,718 INFO [RS:0;7a780d55532c:40625 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-17T15:32:53,718 INFO [RS:0;7a780d55532c:40625 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-17T15:32:53,718 INFO [RS:0;7a780d55532c:40625 {}] regionserver.HRegionServer(959): stopping server 7a780d55532c,40625,1731857572874 2024-11-17T15:32:53,718 INFO [RS:0;7a780d55532c:40625 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T15:32:53,718 INFO [RS:0;7a780d55532c:40625 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7a780d55532c:40625. 2024-11-17T15:32:53,718 DEBUG [RS:0;7a780d55532c:40625 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T15:32:53,718 DEBUG [RS:0;7a780d55532c:40625 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:32:53,718 INFO [RS:0;7a780d55532c:40625 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-17T15:32:53,718 INFO [RS:0;7a780d55532c:40625 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-17T15:32:53,718 INFO [RS:0;7a780d55532c:40625 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-17T15:32:53,718 INFO [RS:0;7a780d55532c:40625 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-17T15:32:53,719 INFO [RS:0;7a780d55532c:40625 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-17T15:32:53,719 DEBUG [RS:0;7a780d55532c:40625 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-17T15:32:53,719 DEBUG [RS:0;7a780d55532c:40625 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-17T15:32:53,719 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T15:32:53,719 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T15:32:53,719 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T15:32:53,719 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T15:32:53,719 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T15:32:53,719 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-17T15:32:53,734 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/data/hbase/meta/1588230740/.tmp/ns/86fa20c73c014d85af44510cebcc047e is 43, key is default/ns:d/1731857573666/Put/seqid=0 2024-11-17T15:32:53,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37887 is added to blk_1073741837_1013 (size=5153) 2024-11-17T15:32:53,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38189 is added to blk_1073741837_1013 (size=5153) 2024-11-17T15:32:53,739 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/data/hbase/meta/1588230740/.tmp/ns/86fa20c73c014d85af44510cebcc047e 2024-11-17T15:32:53,743 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/data/hbase/meta/1588230740/.tmp/ns/86fa20c73c014d85af44510cebcc047e as hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/data/hbase/meta/1588230740/ns/86fa20c73c014d85af44510cebcc047e 2024-11-17T15:32:53,747 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/data/hbase/meta/1588230740/ns/86fa20c73c014d85af44510cebcc047e, entries=2, sequenceid=6, filesize=5.0 K 2024-11-17T15:32:53,748 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 29ms, sequenceid=6, compaction requested=false 2024-11-17T15:32:53,751 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-17T15:32:53,752 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T15:32:53,752 INFO [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T15:32:53,752 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731857573719Running coprocessor pre-close hooks at 1731857573719Disabling compacts and flushes for region at 1731857573719Disabling writes for close at 1731857573719Obtaining lock to block concurrent updates at 1731857573719Preparing flush snapshotting stores in 1588230740 at 1731857573719Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731857573719Flushing stores of hbase:meta,,1.1588230740 at 1731857573720 (+1 ms)Flushing 1588230740/ns: creating writer at 1731857573720Flushing 1588230740/ns: appending metadata at 1731857573734 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731857573734Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7150e312: reopening flushed file at 1731857573743 (+9 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 29ms, sequenceid=6, compaction requested=false at 1731857573748 (+5 ms)Writing region close event to WAL at 1731857573749 (+1 ms)Running coprocessor post-close hooks at 1731857573752 (+3 ms)Closed at 1731857573752 2024-11-17T15:32:53,752 DEBUG [RS_CLOSE_META-regionserver/7a780d55532c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-17T15:32:53,818 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,35049,1731857381690/7a780d55532c%2C35049%2C1731857381690.1731857381925 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:53,818 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42915/user/jenkins/test-data/e5b6be20-cfce-af90-796f-b6abef09b1ac/WALs/7a780d55532c,38057,1731857380513/7a780d55532c%2C38057%2C1731857380513.meta.1731857381524.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T15:32:53,919 INFO [RS:0;7a780d55532c:40625 {}] regionserver.HRegionServer(976): stopping server 7a780d55532c,40625,1731857572874; all regions closed. 2024-11-17T15:32:53,919 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:53,919 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:53,920 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:53,920 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:53,920 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:53,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38189 is added to blk_1073741834_1010 (size=1152) 2024-11-17T15:32:53,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37887 is added to blk_1073741834_1010 (size=1152) 2024-11-17T15:32:53,924 DEBUG [RS:0;7a780d55532c:40625 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/oldWALs 2024-11-17T15:32:53,924 INFO [RS:0;7a780d55532c:40625 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7a780d55532c%2C40625%2C1731857572874.meta:.meta(num 1731857573619) 2024-11-17T15:32:53,924 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:53,924 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:53,924 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:53,924 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:53,925 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:53,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37887 is added to blk_1073741833_1009 (size=93) 2024-11-17T15:32:53,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38189 is added to blk_1073741833_1009 (size=93) 2024-11-17T15:32:53,928 DEBUG [RS:0;7a780d55532c:40625 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/oldWALs 2024-11-17T15:32:53,928 INFO [RS:0;7a780d55532c:40625 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7a780d55532c%2C40625%2C1731857572874:(num 1731857573248) 2024-11-17T15:32:53,928 DEBUG [RS:0;7a780d55532c:40625 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T15:32:53,928 INFO [RS:0;7a780d55532c:40625 {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T15:32:53,928 INFO [RS:0;7a780d55532c:40625 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T15:32:53,928 INFO [RS:0;7a780d55532c:40625 {}] hbase.ChoreService(370): Chore service for: regionserver/7a780d55532c:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-17T15:32:53,928 INFO [RS:0;7a780d55532c:40625 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T15:32:53,928 INFO [regionserver/7a780d55532c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T15:32:53,928 INFO [RS:0;7a780d55532c:40625 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40625 2024-11-17T15:32:53,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40625-0x101268f4e630001, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7a780d55532c,40625,1731857572874 2024-11-17T15:32:53,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42239-0x101268f4e630000, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T15:32:53,930 INFO [RS:0;7a780d55532c:40625 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T15:32:53,931 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7a780d55532c,40625,1731857572874] 2024-11-17T15:32:53,933 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7a780d55532c,40625,1731857572874 already deleted, retry=false 2024-11-17T15:32:53,934 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7a780d55532c,40625,1731857572874 expired; onlineServers=0 2024-11-17T15:32:53,934 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7a780d55532c,42239,1731857572831' ***** 2024-11-17T15:32:53,934 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-17T15:32:53,934 INFO [M:0;7a780d55532c:42239 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T15:32:53,934 INFO [M:0;7a780d55532c:42239 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T15:32:53,934 DEBUG [M:0;7a780d55532c:42239 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-17T15:32:53,934 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-17T15:32:53,934 DEBUG [M:0;7a780d55532c:42239 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-17T15:32:53,934 DEBUG [master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.large.0-1731857573016 {}] cleaner.HFileCleaner(306): Exit Thread[master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.large.0-1731857573016,5,FailOnTimeoutGroup] 2024-11-17T15:32:53,934 DEBUG [master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.small.0-1731857573016 {}] cleaner.HFileCleaner(306): Exit Thread[master/7a780d55532c:0:becomeActiveMaster-HFileCleaner.small.0-1731857573016,5,FailOnTimeoutGroup] 2024-11-17T15:32:53,934 INFO [M:0;7a780d55532c:42239 {}] hbase.ChoreService(370): Chore service for: master/7a780d55532c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-17T15:32:53,934 INFO [M:0;7a780d55532c:42239 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T15:32:53,934 DEBUG [M:0;7a780d55532c:42239 {}] master.HMaster(1795): Stopping service threads 2024-11-17T15:32:53,934 INFO [M:0;7a780d55532c:42239 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-17T15:32:53,934 INFO [M:0;7a780d55532c:42239 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T15:32:53,934 INFO [M:0;7a780d55532c:42239 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-17T15:32:53,934 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-17T15:32:53,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42239-0x101268f4e630000, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-17T15:32:53,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42239-0x101268f4e630000, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T15:32:53,935 DEBUG [M:0;7a780d55532c:42239 {}] zookeeper.ZKUtil(347): master:42239-0x101268f4e630000, quorum=127.0.0.1:58465, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-17T15:32:53,935 WARN [M:0;7a780d55532c:42239 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-17T15:32:53,936 INFO [M:0;7a780d55532c:42239 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/.lastflushedseqids 2024-11-17T15:32:53,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38189 is added to blk_1073741838_1014 (size=99) 2024-11-17T15:32:53,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37887 is added to blk_1073741838_1014 (size=99) 2024-11-17T15:32:53,941 INFO [M:0;7a780d55532c:42239 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-17T15:32:53,941 INFO [M:0;7a780d55532c:42239 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-17T15:32:53,941 DEBUG [M:0;7a780d55532c:42239 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T15:32:53,941 INFO [M:0;7a780d55532c:42239 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:32:53,941 DEBUG [M:0;7a780d55532c:42239 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:32:53,941 DEBUG [M:0;7a780d55532c:42239 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T15:32:53,941 DEBUG [M:0;7a780d55532c:42239 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:32:53,941 INFO [M:0;7a780d55532c:42239 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-17T15:32:53,956 DEBUG [M:0;7a780d55532c:42239 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5a4958bf952844e3ba625f6a906349b1 is 82, key is hbase:meta,,1/info:regioninfo/1731857573652/Put/seqid=0 2024-11-17T15:32:53,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38189 is added to blk_1073741839_1015 (size=5672) 2024-11-17T15:32:53,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37887 is added to blk_1073741839_1015 (size=5672) 2024-11-17T15:32:53,961 INFO [M:0;7a780d55532c:42239 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5a4958bf952844e3ba625f6a906349b1 2024-11-17T15:32:53,978 DEBUG [M:0;7a780d55532c:42239 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bb55fc111a214a13ae51d406c126fc09 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731857573669/Put/seqid=0 2024-11-17T15:32:53,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37887 is added to blk_1073741840_1016 (size=5275) 2024-11-17T15:32:53,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38189 is added to blk_1073741840_1016 (size=5275) 2024-11-17T15:32:53,982 INFO [M:0;7a780d55532c:42239 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bb55fc111a214a13ae51d406c126fc09 2024-11-17T15:32:54,000 DEBUG [M:0;7a780d55532c:42239 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3dba354d6d994f8794f68b5d8e9734b0 is 69, key is 7a780d55532c,40625,1731857572874/rs:state/1731857573108/Put/seqid=0 2024-11-17T15:32:54,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38189 is added to blk_1073741841_1017 (size=5156) 2024-11-17T15:32:54,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37887 is added to blk_1073741841_1017 (size=5156) 2024-11-17T15:32:54,005 INFO [M:0;7a780d55532c:42239 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3dba354d6d994f8794f68b5d8e9734b0 2024-11-17T15:32:54,022 DEBUG [M:0;7a780d55532c:42239 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7742bf60773b4272b612e361da672f5d is 52, key is load_balancer_on/state:d/1731857573695/Put/seqid=0 2024-11-17T15:32:54,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38189 is added to blk_1073741842_1018 (size=5056) 2024-11-17T15:32:54,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37887 is added to blk_1073741842_1018 (size=5056) 2024-11-17T15:32:54,027 INFO [M:0;7a780d55532c:42239 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7742bf60773b4272b612e361da672f5d 2024-11-17T15:32:54,031 DEBUG [M:0;7a780d55532c:42239 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5a4958bf952844e3ba625f6a906349b1 as hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5a4958bf952844e3ba625f6a906349b1 2024-11-17T15:32:54,032 INFO [RS:0;7a780d55532c:40625 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T15:32:54,032 INFO [RS:0;7a780d55532c:40625 {}] regionserver.HRegionServer(1031): Exiting; stopping=7a780d55532c,40625,1731857572874; zookeeper connection closed. 2024-11-17T15:32:54,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40625-0x101268f4e630001, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T15:32:54,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40625-0x101268f4e630001, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T15:32:54,032 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@50f5155e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@50f5155e 2024-11-17T15:32:54,032 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-17T15:32:54,035 INFO [M:0;7a780d55532c:42239 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5a4958bf952844e3ba625f6a906349b1, entries=8, sequenceid=29, filesize=5.5 K 2024-11-17T15:32:54,036 DEBUG [M:0;7a780d55532c:42239 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bb55fc111a214a13ae51d406c126fc09 as hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/bb55fc111a214a13ae51d406c126fc09 2024-11-17T15:32:54,040 INFO [M:0;7a780d55532c:42239 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/bb55fc111a214a13ae51d406c126fc09, entries=3, sequenceid=29, filesize=5.2 K 2024-11-17T15:32:54,041 DEBUG [M:0;7a780d55532c:42239 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3dba354d6d994f8794f68b5d8e9734b0 as hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3dba354d6d994f8794f68b5d8e9734b0 2024-11-17T15:32:54,044 INFO [M:0;7a780d55532c:42239 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3dba354d6d994f8794f68b5d8e9734b0, entries=1, sequenceid=29, filesize=5.0 K 2024-11-17T15:32:54,045 DEBUG [M:0;7a780d55532c:42239 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7742bf60773b4272b612e361da672f5d as hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/7742bf60773b4272b612e361da672f5d 2024-11-17T15:32:54,048 INFO [M:0;7a780d55532c:42239 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45087/user/jenkins/test-data/534d93ed-341d-d886-3774-fe053d729a18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/7742bf60773b4272b612e361da672f5d, entries=1, sequenceid=29, filesize=4.9 K 2024-11-17T15:32:54,049 INFO [M:0;7a780d55532c:42239 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 108ms, sequenceid=29, compaction requested=false 2024-11-17T15:32:54,051 INFO [M:0;7a780d55532c:42239 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T15:32:54,051 DEBUG [M:0;7a780d55532c:42239 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731857573941Disabling compacts and flushes for region at 1731857573941Disabling writes for close at 1731857573941Obtaining lock to block concurrent updates at 1731857573941Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731857573941Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731857573942 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731857573942Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731857573942Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731857573956 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731857573956Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731857573965 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731857573978 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731857573978Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731857573986 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731857574000 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731857574000Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731857574008 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731857574022 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731857574022Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2c948b76: reopening flushed file at 1731857574030 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@356b6049: reopening flushed file at 1731857574035 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@35d37498: reopening flushed file at 1731857574040 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4b449e5: reopening flushed file at 1731857574044 (+4 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 108ms, sequenceid=29, compaction requested=false at 1731857574049 (+5 ms)Writing region close event to WAL at 1731857574051 (+2 ms)Closed at 1731857574051 2024-11-17T15:32:54,052 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:54,052 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:54,052 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:54,052 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:54,053 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T15:32:54,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38189 is added to blk_1073741830_1006 (size=10311) 2024-11-17T15:32:54,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37887 is added to blk_1073741830_1006 (size=10311) 2024-11-17T15:32:54,055 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T15:32:54,055 INFO [M:0;7a780d55532c:42239 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-17T15:32:54,055 INFO [M:0;7a780d55532c:42239 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42239 2024-11-17T15:32:54,056 INFO [M:0;7a780d55532c:42239 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T15:32:54,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42239-0x101268f4e630000, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T15:32:54,158 INFO [M:0;7a780d55532c:42239 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T15:32:54,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42239-0x101268f4e630000, quorum=127.0.0.1:58465, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T15:32:54,160 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5f055e5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:32:54,160 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2884ece6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T15:32:54,160 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T15:32:54,160 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@dbe3904{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T15:32:54,160 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a42ddf9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/hadoop.log.dir/,STOPPED} 2024-11-17T15:32:54,161 WARN [BP-1961293918-172.17.0.2-1731857572181 heartbeating to localhost/127.0.0.1:45087 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T15:32:54,161 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T15:32:54,162 WARN [BP-1961293918-172.17.0.2-1731857572181 heartbeating to localhost/127.0.0.1:45087 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1961293918-172.17.0.2-1731857572181 (Datanode Uuid 12ef5aaf-6c28-4793-8bed-b1d203941f8d) service to localhost/127.0.0.1:45087 2024-11-17T15:32:54,162 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T15:32:54,162 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/cluster_d5303d50-70c1-d4f7-916b-be5078377a84/data/data3/current/BP-1961293918-172.17.0.2-1731857572181 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:32:54,162 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/cluster_d5303d50-70c1-d4f7-916b-be5078377a84/data/data4/current/BP-1961293918-172.17.0.2-1731857572181 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:32:54,162 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T15:32:54,164 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@241be264{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T15:32:54,164 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1c0f9d08{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T15:32:54,164 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T15:32:54,165 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@356603ca{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T15:32:54,165 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4db8ccc1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/hadoop.log.dir/,STOPPED} 2024-11-17T15:32:54,166 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T15:32:54,166 WARN [BP-1961293918-172.17.0.2-1731857572181 heartbeating to localhost/127.0.0.1:45087 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T15:32:54,166 WARN [BP-1961293918-172.17.0.2-1731857572181 heartbeating to localhost/127.0.0.1:45087 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1961293918-172.17.0.2-1731857572181 (Datanode Uuid f75532c3-21ed-417b-be58-fae85f96817d) service to localhost/127.0.0.1:45087 2024-11-17T15:32:54,166 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T15:32:54,166 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/cluster_d5303d50-70c1-d4f7-916b-be5078377a84/data/data1/current/BP-1961293918-172.17.0.2-1731857572181 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:32:54,167 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/cluster_d5303d50-70c1-d4f7-916b-be5078377a84/data/data2/current/BP-1961293918-172.17.0.2-1731857572181 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T15:32:54,167 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T15:32:54,172 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@695f2019{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T15:32:54,172 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@29527032{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T15:32:54,172 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T15:32:54,172 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@224e134c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T15:32:54,172 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@33db594d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3de1488-2f15-504b-8872-dc5f5350715c/hadoop.log.dir/,STOPPED} 2024-11-17T15:32:54,178 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-17T15:32:54,191 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-17T15:32:54,200 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=268 (was 231) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45087 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45087 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:45087 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:45087 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45087 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:45087 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45087 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:45087 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=534 (was 509) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=24 (was 24), ProcessCount=11 (was 11), AvailableMemoryMB=3564 (was 3572)