2024-11-19 04:53:12,665 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-19 04:53:12,681 main DEBUG Took 0.013476 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-19 04:53:12,682 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-19 04:53:12,682 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-19 04:53:12,683 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-19 04:53:12,685 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 04:53:12,693 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-19 04:53:12,706 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 04:53:12,707 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 04:53:12,708 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 04:53:12,709 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 04:53:12,709 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 04:53:12,710 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 04:53:12,711 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 04:53:12,711 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 04:53:12,712 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 04:53:12,713 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 04:53:12,714 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 04:53:12,714 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 04:53:12,715 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 04:53:12,715 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 04:53:12,716 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 04:53:12,716 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 04:53:12,717 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 04:53:12,717 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 04:53:12,718 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 04:53:12,718 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 04:53:12,719 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 04:53:12,719 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 04:53:12,720 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 04:53:12,721 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 04:53:12,721 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 04:53:12,722 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-19 04:53:12,724 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 04:53:12,725 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-19 04:53:12,728 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-19 04:53:12,728 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-19 04:53:12,730 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-19 04:53:12,730 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-19 04:53:12,742 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-19 04:53:12,745 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-19 04:53:12,748 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-19 04:53:12,748 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-19 04:53:12,749 main DEBUG createAppenders(={Console}) 2024-11-19 04:53:12,749 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-19 04:53:12,750 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-19 04:53:12,750 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-19 04:53:12,751 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-19 04:53:12,751 main DEBUG OutputStream closed 2024-11-19 04:53:12,752 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-19 04:53:12,752 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-19 04:53:12,752 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-19 04:53:12,820 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-19 04:53:12,822 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-19 04:53:12,823 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-19 04:53:12,823 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-19 04:53:12,824 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-19 04:53:12,824 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-19 04:53:12,825 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-19 04:53:12,825 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-19 04:53:12,825 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-19 04:53:12,825 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-19 04:53:12,826 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-19 04:53:12,826 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-19 04:53:12,826 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-19 04:53:12,826 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-19 04:53:12,827 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-19 04:53:12,827 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-19 04:53:12,827 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-19 04:53:12,828 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-19 04:53:12,830 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-19 04:53:12,830 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-19 04:53:12,830 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-19 04:53:12,831 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-19T04:53:13,120 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7 2024-11-19 04:53:13,123 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-19 04:53:13,123 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-19T04:53:13,133 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-19T04:53:13,165 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=224, ProcessCount=11, AvailableMemoryMB=12591 2024-11-19T04:53:13,168 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T04:53:13,184 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/cluster_d123a51d-24cf-f431-42e8-c73d729b17eb, deleteOnExit=true 2024-11-19T04:53:13,184 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T04:53:13,185 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/test.cache.data in system properties and HBase conf 2024-11-19T04:53:13,186 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T04:53:13,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/hadoop.log.dir in system properties and HBase conf 2024-11-19T04:53:13,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T04:53:13,188 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T04:53:13,188 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T04:53:13,285 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-19T04:53:13,377 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T04:53:13,381 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T04:53:13,381 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T04:53:13,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T04:53:13,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T04:53:13,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T04:53:13,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T04:53:13,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T04:53:13,384 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T04:53:13,384 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T04:53:13,384 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/nfs.dump.dir in system properties and HBase conf 2024-11-19T04:53:13,385 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/java.io.tmpdir in system properties and HBase conf 2024-11-19T04:53:13,385 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T04:53:13,386 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T04:53:13,386 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T04:53:13,847 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T04:53:14,176 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-19T04:53:14,258 INFO [Time-limited test {}] log.Log(170): Logging initialized @2301ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-19T04:53:14,335 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:53:14,400 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T04:53:14,419 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T04:53:14,420 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T04:53:14,421 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T04:53:14,434 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:53:14,436 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/hadoop.log.dir/,AVAILABLE} 2024-11-19T04:53:14,437 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T04:53:14,625 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/java.io.tmpdir/jetty-localhost-36331-hadoop-hdfs-3_4_1-tests_jar-_-any-18245738614350942339/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T04:53:14,635 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:36331} 2024-11-19T04:53:14,636 INFO [Time-limited test {}] server.Server(415): Started @2680ms 2024-11-19T04:53:14,670 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T04:53:15,017 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:53:15,023 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T04:53:15,024 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T04:53:15,024 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T04:53:15,024 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T04:53:15,025 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/hadoop.log.dir/,AVAILABLE} 2024-11-19T04:53:15,026 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T04:53:15,145 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/java.io.tmpdir/jetty-localhost-41573-hadoop-hdfs-3_4_1-tests_jar-_-any-14714258900889367640/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:53:15,146 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:41573} 2024-11-19T04:53:15,146 INFO [Time-limited test {}] server.Server(415): Started @3191ms 2024-11-19T04:53:15,202 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T04:53:15,321 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:53:15,326 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T04:53:15,335 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T04:53:15,335 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T04:53:15,336 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T04:53:15,337 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/hadoop.log.dir/,AVAILABLE} 2024-11-19T04:53:15,337 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T04:53:15,472 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/java.io.tmpdir/jetty-localhost-42081-hadoop-hdfs-3_4_1-tests_jar-_-any-8765671171853387012/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:53:15,473 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:42081} 2024-11-19T04:53:15,473 INFO [Time-limited test {}] server.Server(415): Started @3518ms 2024-11-19T04:53:15,475 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T04:53:15,646 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/cluster_d123a51d-24cf-f431-42e8-c73d729b17eb/data/data2/current/BP-969444313-172.17.0.2-1731991993942/current, will proceed with Du for space computation calculation, 2024-11-19T04:53:15,646 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/cluster_d123a51d-24cf-f431-42e8-c73d729b17eb/data/data1/current/BP-969444313-172.17.0.2-1731991993942/current, will proceed with Du for space computation calculation, 2024-11-19T04:53:15,646 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/cluster_d123a51d-24cf-f431-42e8-c73d729b17eb/data/data3/current/BP-969444313-172.17.0.2-1731991993942/current, will proceed with Du for space computation calculation, 2024-11-19T04:53:15,646 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/cluster_d123a51d-24cf-f431-42e8-c73d729b17eb/data/data4/current/BP-969444313-172.17.0.2-1731991993942/current, will proceed with Du for space computation calculation, 2024-11-19T04:53:15,704 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T04:53:15,705 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T04:53:15,775 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf608202d608fa255 with lease ID 0xb0de08237e862041: Processing first storage report for DS-cae856b5-a944-46bf-8d2a-259fe71de563 from datanode DatanodeRegistration(127.0.0.1:38529, datanodeUuid=df9449b8-380d-4ec3-87f3-b6292c8e0f07, infoPort=44923, infoSecurePort=0, ipcPort=39405, storageInfo=lv=-57;cid=testClusterID;nsid=1156140969;c=1731991993942) 2024-11-19T04:53:15,777 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf608202d608fa255 with lease ID 0xb0de08237e862041: from storage DS-cae856b5-a944-46bf-8d2a-259fe71de563 node DatanodeRegistration(127.0.0.1:38529, datanodeUuid=df9449b8-380d-4ec3-87f3-b6292c8e0f07, infoPort=44923, infoSecurePort=0, ipcPort=39405, storageInfo=lv=-57;cid=testClusterID;nsid=1156140969;c=1731991993942), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-19T04:53:15,777 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4e71eda564b95920 with lease ID 0xb0de08237e862042: Processing first storage report for DS-a6a75b9d-a9bc-4ec1-9c88-33ab7d803ded from datanode DatanodeRegistration(127.0.0.1:40499, datanodeUuid=8010304b-bd85-463f-8ab5-6eababe95d1a, infoPort=44957, infoSecurePort=0, ipcPort=33751, storageInfo=lv=-57;cid=testClusterID;nsid=1156140969;c=1731991993942) 2024-11-19T04:53:15,778 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4e71eda564b95920 with lease ID 0xb0de08237e862042: from storage DS-a6a75b9d-a9bc-4ec1-9c88-33ab7d803ded node DatanodeRegistration(127.0.0.1:40499, datanodeUuid=8010304b-bd85-463f-8ab5-6eababe95d1a, infoPort=44957, infoSecurePort=0, ipcPort=33751, storageInfo=lv=-57;cid=testClusterID;nsid=1156140969;c=1731991993942), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-19T04:53:15,778 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf608202d608fa255 with lease ID 0xb0de08237e862041: Processing first storage report for DS-8d81cca1-3c25-4fff-a9fb-f0940fcfbbe1 from datanode DatanodeRegistration(127.0.0.1:38529, datanodeUuid=df9449b8-380d-4ec3-87f3-b6292c8e0f07, infoPort=44923, infoSecurePort=0, ipcPort=39405, storageInfo=lv=-57;cid=testClusterID;nsid=1156140969;c=1731991993942) 2024-11-19T04:53:15,778 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf608202d608fa255 with lease ID 0xb0de08237e862041: from storage DS-8d81cca1-3c25-4fff-a9fb-f0940fcfbbe1 node DatanodeRegistration(127.0.0.1:38529, datanodeUuid=df9449b8-380d-4ec3-87f3-b6292c8e0f07, infoPort=44923, infoSecurePort=0, ipcPort=39405, storageInfo=lv=-57;cid=testClusterID;nsid=1156140969;c=1731991993942), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:53:15,779 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4e71eda564b95920 with lease ID 0xb0de08237e862042: Processing first storage report for DS-4c661864-2266-4252-8838-06672133608b from datanode DatanodeRegistration(127.0.0.1:40499, datanodeUuid=8010304b-bd85-463f-8ab5-6eababe95d1a, infoPort=44957, infoSecurePort=0, ipcPort=33751, storageInfo=lv=-57;cid=testClusterID;nsid=1156140969;c=1731991993942) 2024-11-19T04:53:15,779 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4e71eda564b95920 with lease ID 0xb0de08237e862042: from storage DS-4c661864-2266-4252-8838-06672133608b node DatanodeRegistration(127.0.0.1:40499, datanodeUuid=8010304b-bd85-463f-8ab5-6eababe95d1a, infoPort=44957, infoSecurePort=0, ipcPort=33751, storageInfo=lv=-57;cid=testClusterID;nsid=1156140969;c=1731991993942), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-19T04:53:15,843 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7 2024-11-19T04:53:15,916 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/cluster_d123a51d-24cf-f431-42e8-c73d729b17eb/zookeeper_0, clientPort=59995, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/cluster_d123a51d-24cf-f431-42e8-c73d729b17eb/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/cluster_d123a51d-24cf-f431-42e8-c73d729b17eb/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T04:53:15,926 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59995 2024-11-19T04:53:15,936 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:53:15,938 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:53:16,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40499 is added to blk_1073741825_1001 (size=7) 2024-11-19T04:53:16,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741825_1001 (size=7) 2024-11-19T04:53:16,574 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7 with version=8 2024-11-19T04:53:16,574 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/hbase-staging 2024-11-19T04:53:16,680 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-19T04:53:16,932 INFO [Time-limited test {}] client.ConnectionUtils(128): master/08a7f35e60d4:0 server-side Connection retries=45 2024-11-19T04:53:16,944 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T04:53:16,944 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T04:53:16,949 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T04:53:16,949 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T04:53:16,949 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T04:53:17,089 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T04:53:17,150 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-19T04:53:17,158 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-19T04:53:17,162 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T04:53:17,188 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 7516 (auto-detected) 2024-11-19T04:53:17,189 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-19T04:53:17,208 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37341 2024-11-19T04:53:17,229 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37341 connecting to ZooKeeper ensemble=127.0.0.1:59995 2024-11-19T04:53:17,261 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:373410x0, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T04:53:17,264 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37341-0x1012e92708a0000 connected 2024-11-19T04:53:17,299 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:53:17,302 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:53:17,315 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37341-0x1012e92708a0000, quorum=127.0.0.1:59995, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T04:53:17,319 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7, hbase.cluster.distributed=false 2024-11-19T04:53:17,345 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37341-0x1012e92708a0000, quorum=127.0.0.1:59995, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T04:53:17,349 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37341 2024-11-19T04:53:17,349 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37341 2024-11-19T04:53:17,350 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37341 2024-11-19T04:53:17,350 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37341 2024-11-19T04:53:17,351 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37341 2024-11-19T04:53:17,463 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/08a7f35e60d4:0 server-side Connection retries=45 2024-11-19T04:53:17,465 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T04:53:17,465 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T04:53:17,465 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T04:53:17,466 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T04:53:17,466 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T04:53:17,469 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T04:53:17,471 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T04:53:17,472 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36677 2024-11-19T04:53:17,473 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36677 connecting to ZooKeeper ensemble=127.0.0.1:59995 2024-11-19T04:53:17,475 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:53:17,479 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:53:17,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:366770x0, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T04:53:17,486 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:366770x0, quorum=127.0.0.1:59995, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T04:53:17,486 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36677-0x1012e92708a0001 connected 2024-11-19T04:53:17,490 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T04:53:17,498 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T04:53:17,500 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36677-0x1012e92708a0001, quorum=127.0.0.1:59995, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T04:53:17,505 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36677-0x1012e92708a0001, quorum=127.0.0.1:59995, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T04:53:17,506 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36677 2024-11-19T04:53:17,506 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36677 2024-11-19T04:53:17,507 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36677 2024-11-19T04:53:17,508 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36677 2024-11-19T04:53:17,509 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36677 2024-11-19T04:53:17,525 DEBUG [M:0;08a7f35e60d4:37341 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;08a7f35e60d4:37341 2024-11-19T04:53:17,526 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/08a7f35e60d4,37341,1731991996736 2024-11-19T04:53:17,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37341-0x1012e92708a0000, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T04:53:17,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36677-0x1012e92708a0001, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T04:53:17,535 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37341-0x1012e92708a0000, quorum=127.0.0.1:59995, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/08a7f35e60d4,37341,1731991996736 2024-11-19T04:53:17,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36677-0x1012e92708a0001, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T04:53:17,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37341-0x1012e92708a0000, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:53:17,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36677-0x1012e92708a0001, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:53:17,564 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37341-0x1012e92708a0000, quorum=127.0.0.1:59995, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T04:53:17,566 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/08a7f35e60d4,37341,1731991996736 from backup master directory 2024-11-19T04:53:17,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37341-0x1012e92708a0000, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/08a7f35e60d4,37341,1731991996736 2024-11-19T04:53:17,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36677-0x1012e92708a0001, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T04:53:17,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37341-0x1012e92708a0000, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T04:53:17,570 WARN [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T04:53:17,570 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=08a7f35e60d4,37341,1731991996736 2024-11-19T04:53:17,572 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-19T04:53:17,573 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-19T04:53:17,632 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/hbase.id] with ID: c1bcc81e-7fa7-42a1-aaa0-596013051a95 2024-11-19T04:53:17,632 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/.tmp/hbase.id 2024-11-19T04:53:17,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40499 is added to blk_1073741826_1002 (size=42) 2024-11-19T04:53:17,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741826_1002 (size=42) 2024-11-19T04:53:17,645 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/.tmp/hbase.id]:[hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/hbase.id] 2024-11-19T04:53:17,690 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:53:17,694 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T04:53:17,713 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 17ms. 2024-11-19T04:53:17,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37341-0x1012e92708a0000, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:53:17,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36677-0x1012e92708a0001, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:53:17,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741827_1003 (size=196) 2024-11-19T04:53:17,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40499 is added to blk_1073741827_1003 (size=196) 2024-11-19T04:53:18,152 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T04:53:18,154 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T04:53:18,160 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T04:53:18,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741828_1004 (size=1189) 2024-11-19T04:53:18,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40499 is added to blk_1073741828_1004 (size=1189) 2024-11-19T04:53:18,208 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/MasterData/data/master/store 2024-11-19T04:53:18,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40499 is added to blk_1073741829_1005 (size=34) 2024-11-19T04:53:18,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741829_1005 (size=34) 2024-11-19T04:53:18,233 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-19T04:53:18,237 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:53:18,239 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T04:53:18,239 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:53:18,239 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:53:18,241 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T04:53:18,241 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:53:18,241 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:53:18,243 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731991998238Disabling compacts and flushes for region at 1731991998238Disabling writes for close at 1731991998241 (+3 ms)Writing region close event to WAL at 1731991998241Closed at 1731991998241 2024-11-19T04:53:18,245 WARN [master/08a7f35e60d4:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/MasterData/data/master/store/.initializing 2024-11-19T04:53:18,246 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/MasterData/WALs/08a7f35e60d4,37341,1731991996736 2024-11-19T04:53:18,270 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=08a7f35e60d4%2C37341%2C1731991996736, suffix=, logDir=hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/MasterData/WALs/08a7f35e60d4,37341,1731991996736, archiveDir=hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/MasterData/oldWALs, maxLogs=10 2024-11-19T04:53:18,282 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C37341%2C1731991996736.1731991998277 2024-11-19T04:53:18,302 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/MasterData/WALs/08a7f35e60d4,37341,1731991996736/08a7f35e60d4%2C37341%2C1731991996736.1731991998277 2024-11-19T04:53:18,310 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44957:44957),(127.0.0.1/127.0.0.1:44923:44923)] 2024-11-19T04:53:18,311 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T04:53:18,311 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:53:18,314 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:53:18,315 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:53:18,354 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:53:18,381 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T04:53:18,384 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:53:18,387 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:53:18,387 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:53:18,390 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T04:53:18,390 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:53:18,391 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T04:53:18,391 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:53:18,393 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T04:53:18,393 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:53:18,394 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T04:53:18,395 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:53:18,397 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T04:53:18,397 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:53:18,398 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T04:53:18,398 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:53:18,401 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:53:18,402 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:53:18,407 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:53:18,407 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:53:18,411 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T04:53:18,414 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:53:18,419 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T04:53:18,420 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=695815, jitterRate=-0.11522635817527771}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T04:53:18,425 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731991998328Initializing all the Stores at 1731991998330 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731991998331 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731991998332 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731991998332Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731991998332Cleaning up temporary data from old regions at 1731991998407 (+75 ms)Region opened successfully at 1731991998425 (+18 ms) 2024-11-19T04:53:18,426 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T04:53:18,466 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ff88e7e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=08a7f35e60d4/172.17.0.2:0 2024-11-19T04:53:18,498 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T04:53:18,509 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T04:53:18,509 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T04:53:18,513 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T04:53:18,514 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-19T04:53:18,519 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-19T04:53:18,519 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T04:53:18,544 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T04:53:18,553 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37341-0x1012e92708a0000, quorum=127.0.0.1:59995, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T04:53:18,555 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T04:53:18,558 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T04:53:18,559 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37341-0x1012e92708a0000, quorum=127.0.0.1:59995, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T04:53:18,562 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T04:53:18,564 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T04:53:18,567 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37341-0x1012e92708a0000, quorum=127.0.0.1:59995, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T04:53:18,569 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T04:53:18,570 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37341-0x1012e92708a0000, quorum=127.0.0.1:59995, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T04:53:18,572 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T04:53:18,588 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37341-0x1012e92708a0000, quorum=127.0.0.1:59995, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T04:53:18,590 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T04:53:18,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37341-0x1012e92708a0000, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T04:53:18,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36677-0x1012e92708a0001, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T04:53:18,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36677-0x1012e92708a0001, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:53:18,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37341-0x1012e92708a0000, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:53:18,598 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=08a7f35e60d4,37341,1731991996736, sessionid=0x1012e92708a0000, setting cluster-up flag (Was=false) 2024-11-19T04:53:18,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36677-0x1012e92708a0001, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:53:18,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37341-0x1012e92708a0000, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:53:18,617 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T04:53:18,619 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=08a7f35e60d4,37341,1731991996736 2024-11-19T04:53:18,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37341-0x1012e92708a0000, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:53:18,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36677-0x1012e92708a0001, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:53:18,632 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T04:53:18,633 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=08a7f35e60d4,37341,1731991996736 2024-11-19T04:53:18,640 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T04:53:18,713 INFO [RS:0;08a7f35e60d4:36677 {}] regionserver.HRegionServer(746): ClusterId : c1bcc81e-7fa7-42a1-aaa0-596013051a95 2024-11-19T04:53:18,715 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T04:53:18,716 DEBUG [RS:0;08a7f35e60d4:36677 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T04:53:18,722 DEBUG [RS:0;08a7f35e60d4:36677 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T04:53:18,722 DEBUG [RS:0;08a7f35e60d4:36677 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T04:53:18,726 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T04:53:18,726 DEBUG [RS:0;08a7f35e60d4:36677 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T04:53:18,727 DEBUG [RS:0;08a7f35e60d4:36677 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2db56641, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=08a7f35e60d4/172.17.0.2:0 2024-11-19T04:53:18,732 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T04:53:18,738 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 08a7f35e60d4,37341,1731991996736 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T04:53:18,744 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/08a7f35e60d4:0, corePoolSize=5, maxPoolSize=5 2024-11-19T04:53:18,745 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/08a7f35e60d4:0, corePoolSize=5, maxPoolSize=5 2024-11-19T04:53:18,745 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/08a7f35e60d4:0, corePoolSize=5, maxPoolSize=5 2024-11-19T04:53:18,745 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/08a7f35e60d4:0, corePoolSize=5, maxPoolSize=5 2024-11-19T04:53:18,745 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/08a7f35e60d4:0, corePoolSize=10, maxPoolSize=10 2024-11-19T04:53:18,745 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:53:18,745 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/08a7f35e60d4:0, corePoolSize=2, maxPoolSize=2 2024-11-19T04:53:18,746 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:53:18,747 DEBUG [RS:0;08a7f35e60d4:36677 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;08a7f35e60d4:36677 2024-11-19T04:53:18,749 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731992028749 2024-11-19T04:53:18,751 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T04:53:18,751 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T04:53:18,751 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T04:53:18,752 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T04:53:18,752 INFO [RS:0;08a7f35e60d4:36677 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T04:53:18,752 INFO [RS:0;08a7f35e60d4:36677 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T04:53:18,752 DEBUG [RS:0;08a7f35e60d4:36677 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T04:53:18,755 INFO [RS:0;08a7f35e60d4:36677 {}] regionserver.HRegionServer(2659): reportForDuty to master=08a7f35e60d4,37341,1731991996736 with port=36677, startcode=1731991997423 2024-11-19T04:53:18,756 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T04:53:18,756 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T04:53:18,757 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T04:53:18,757 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T04:53:18,757 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:53:18,757 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T04:53:18,758 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T04:53:18,761 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T04:53:18,763 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T04:53:18,763 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T04:53:18,768 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T04:53:18,768 DEBUG [RS:0;08a7f35e60d4:36677 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T04:53:18,768 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T04:53:18,770 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.large.0-1731991998770,5,FailOnTimeoutGroup] 2024-11-19T04:53:18,771 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.small.0-1731991998770,5,FailOnTimeoutGroup] 2024-11-19T04:53:18,771 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T04:53:18,771 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T04:53:18,772 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T04:53:18,772 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T04:53:18,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40499 is added to blk_1073741831_1007 (size=1321) 2024-11-19T04:53:18,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741831_1007 (size=1321) 2024-11-19T04:53:18,777 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T04:53:18,777 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7 2024-11-19T04:53:18,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40499 is added to blk_1073741832_1008 (size=32) 2024-11-19T04:53:18,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741832_1008 (size=32) 2024-11-19T04:53:18,790 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:53:18,792 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T04:53:18,795 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T04:53:18,795 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:53:18,796 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:53:18,796 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T04:53:18,798 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T04:53:18,798 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:53:18,799 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:53:18,799 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T04:53:18,801 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T04:53:18,802 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:53:18,802 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:53:18,803 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T04:53:18,805 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T04:53:18,805 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:53:18,806 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:53:18,806 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T04:53:18,807 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/hbase/meta/1588230740 2024-11-19T04:53:18,808 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/hbase/meta/1588230740 2024-11-19T04:53:18,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T04:53:18,812 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T04:53:18,813 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T04:53:18,815 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T04:53:18,820 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T04:53:18,821 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=762110, jitterRate=-0.030927076935768127}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T04:53:18,825 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731991998790Initializing all the Stores at 1731991998792 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731991998792Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731991998792Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731991998792Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731991998792Cleaning up temporary data from old regions at 1731991998812 (+20 ms)Region opened successfully at 1731991998825 (+13 ms) 2024-11-19T04:53:18,826 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T04:53:18,826 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T04:53:18,826 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T04:53:18,826 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T04:53:18,826 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T04:53:18,827 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T04:53:18,828 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731991998825Disabling compacts and flushes for region at 1731991998825Disabling writes for close at 1731991998826 (+1 ms)Writing region close event to WAL at 1731991998827 (+1 ms)Closed at 1731991998827 2024-11-19T04:53:18,832 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T04:53:18,832 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T04:53:18,840 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T04:53:18,841 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48039, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T04:53:18,848 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37341 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 08a7f35e60d4,36677,1731991997423 2024-11-19T04:53:18,850 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T04:53:18,851 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37341 {}] master.ServerManager(517): Registering regionserver=08a7f35e60d4,36677,1731991997423 2024-11-19T04:53:18,852 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T04:53:18,865 DEBUG [RS:0;08a7f35e60d4:36677 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7 2024-11-19T04:53:18,865 DEBUG [RS:0;08a7f35e60d4:36677 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35757 2024-11-19T04:53:18,865 DEBUG [RS:0;08a7f35e60d4:36677 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T04:53:18,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37341-0x1012e92708a0000, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T04:53:18,871 DEBUG [RS:0;08a7f35e60d4:36677 {}] zookeeper.ZKUtil(111): regionserver:36677-0x1012e92708a0001, quorum=127.0.0.1:59995, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/08a7f35e60d4,36677,1731991997423 2024-11-19T04:53:18,871 WARN [RS:0;08a7f35e60d4:36677 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T04:53:18,871 INFO [RS:0;08a7f35e60d4:36677 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T04:53:18,871 DEBUG [RS:0;08a7f35e60d4:36677 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423 2024-11-19T04:53:18,873 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [08a7f35e60d4,36677,1731991997423] 2024-11-19T04:53:18,896 INFO [RS:0;08a7f35e60d4:36677 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T04:53:18,907 INFO [RS:0;08a7f35e60d4:36677 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T04:53:18,913 INFO [RS:0;08a7f35e60d4:36677 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T04:53:18,913 INFO [RS:0;08a7f35e60d4:36677 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T04:53:18,914 INFO [RS:0;08a7f35e60d4:36677 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T04:53:18,920 INFO [RS:0;08a7f35e60d4:36677 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T04:53:18,921 INFO [RS:0;08a7f35e60d4:36677 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T04:53:18,921 DEBUG [RS:0;08a7f35e60d4:36677 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:53:18,922 DEBUG [RS:0;08a7f35e60d4:36677 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:53:18,922 DEBUG [RS:0;08a7f35e60d4:36677 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:53:18,922 DEBUG [RS:0;08a7f35e60d4:36677 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:53:18,922 DEBUG [RS:0;08a7f35e60d4:36677 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:53:18,922 DEBUG [RS:0;08a7f35e60d4:36677 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/08a7f35e60d4:0, corePoolSize=2, maxPoolSize=2 2024-11-19T04:53:18,922 DEBUG [RS:0;08a7f35e60d4:36677 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:53:18,923 DEBUG [RS:0;08a7f35e60d4:36677 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:53:18,923 DEBUG [RS:0;08a7f35e60d4:36677 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:53:18,923 DEBUG [RS:0;08a7f35e60d4:36677 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:53:18,923 DEBUG [RS:0;08a7f35e60d4:36677 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:53:18,923 DEBUG [RS:0;08a7f35e60d4:36677 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:53:18,923 DEBUG [RS:0;08a7f35e60d4:36677 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/08a7f35e60d4:0, corePoolSize=3, maxPoolSize=3 2024-11-19T04:53:18,923 DEBUG [RS:0;08a7f35e60d4:36677 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0, corePoolSize=3, maxPoolSize=3 2024-11-19T04:53:18,924 INFO [RS:0;08a7f35e60d4:36677 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T04:53:18,924 INFO [RS:0;08a7f35e60d4:36677 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T04:53:18,925 INFO [RS:0;08a7f35e60d4:36677 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T04:53:18,925 INFO [RS:0;08a7f35e60d4:36677 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T04:53:18,925 INFO [RS:0;08a7f35e60d4:36677 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T04:53:18,925 INFO [RS:0;08a7f35e60d4:36677 {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,36677,1731991997423-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T04:53:18,945 INFO [RS:0;08a7f35e60d4:36677 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T04:53:18,946 INFO [RS:0;08a7f35e60d4:36677 {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,36677,1731991997423-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T04:53:18,947 INFO [RS:0;08a7f35e60d4:36677 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:53:18,947 INFO [RS:0;08a7f35e60d4:36677 {}] regionserver.Replication(171): 08a7f35e60d4,36677,1731991997423 started 2024-11-19T04:53:18,964 INFO [RS:0;08a7f35e60d4:36677 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:53:18,965 INFO [RS:0;08a7f35e60d4:36677 {}] regionserver.HRegionServer(1482): Serving as 08a7f35e60d4,36677,1731991997423, RpcServer on 08a7f35e60d4/172.17.0.2:36677, sessionid=0x1012e92708a0001 2024-11-19T04:53:18,966 DEBUG [RS:0;08a7f35e60d4:36677 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T04:53:18,966 DEBUG [RS:0;08a7f35e60d4:36677 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 08a7f35e60d4,36677,1731991997423 2024-11-19T04:53:18,966 DEBUG [RS:0;08a7f35e60d4:36677 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '08a7f35e60d4,36677,1731991997423' 2024-11-19T04:53:18,966 DEBUG [RS:0;08a7f35e60d4:36677 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T04:53:18,967 DEBUG [RS:0;08a7f35e60d4:36677 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T04:53:18,968 DEBUG [RS:0;08a7f35e60d4:36677 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T04:53:18,968 DEBUG [RS:0;08a7f35e60d4:36677 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T04:53:18,968 DEBUG [RS:0;08a7f35e60d4:36677 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 08a7f35e60d4,36677,1731991997423 2024-11-19T04:53:18,968 DEBUG [RS:0;08a7f35e60d4:36677 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '08a7f35e60d4,36677,1731991997423' 2024-11-19T04:53:18,968 DEBUG [RS:0;08a7f35e60d4:36677 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T04:53:18,969 DEBUG [RS:0;08a7f35e60d4:36677 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T04:53:18,969 DEBUG [RS:0;08a7f35e60d4:36677 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T04:53:18,970 INFO [RS:0;08a7f35e60d4:36677 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T04:53:18,970 INFO [RS:0;08a7f35e60d4:36677 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T04:53:19,003 WARN [08a7f35e60d4:37341 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-19T04:53:19,078 INFO [RS:0;08a7f35e60d4:36677 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=08a7f35e60d4%2C36677%2C1731991997423, suffix=, logDir=hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423, archiveDir=hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/oldWALs, maxLogs=32 2024-11-19T04:53:19,080 INFO [RS:0;08a7f35e60d4:36677 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C36677%2C1731991997423.1731991999080 2024-11-19T04:53:19,089 INFO [RS:0;08a7f35e60d4:36677 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423/08a7f35e60d4%2C36677%2C1731991997423.1731991999080 2024-11-19T04:53:19,090 DEBUG [RS:0;08a7f35e60d4:36677 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44923:44923),(127.0.0.1/127.0.0.1:44957:44957)] 2024-11-19T04:53:19,255 DEBUG [08a7f35e60d4:37341 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T04:53:19,267 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=08a7f35e60d4,36677,1731991997423 2024-11-19T04:53:19,273 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 08a7f35e60d4,36677,1731991997423, state=OPENING 2024-11-19T04:53:19,278 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T04:53:19,280 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37341-0x1012e92708a0000, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:53:19,280 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36677-0x1012e92708a0001, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:53:19,281 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T04:53:19,281 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T04:53:19,282 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T04:53:19,284 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=08a7f35e60d4,36677,1731991997423}] 2024-11-19T04:53:19,458 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T04:53:19,462 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43229, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T04:53:19,474 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T04:53:19,475 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T04:53:19,478 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=08a7f35e60d4%2C36677%2C1731991997423.meta, suffix=.meta, logDir=hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423, archiveDir=hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/oldWALs, maxLogs=32 2024-11-19T04:53:19,480 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C36677%2C1731991997423.meta.1731991999479.meta 2024-11-19T04:53:19,487 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423/08a7f35e60d4%2C36677%2C1731991997423.meta.1731991999479.meta 2024-11-19T04:53:19,488 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44923:44923),(127.0.0.1/127.0.0.1:44957:44957)] 2024-11-19T04:53:19,489 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T04:53:19,490 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T04:53:19,493 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T04:53:19,498 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T04:53:19,502 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T04:53:19,503 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:53:19,503 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T04:53:19,503 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T04:53:19,506 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T04:53:19,507 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T04:53:19,507 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:53:19,508 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:53:19,508 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T04:53:19,510 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T04:53:19,510 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:53:19,511 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:53:19,511 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T04:53:19,512 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T04:53:19,512 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:53:19,513 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:53:19,513 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T04:53:19,515 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T04:53:19,515 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:53:19,516 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:53:19,516 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T04:53:19,517 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/hbase/meta/1588230740 2024-11-19T04:53:19,520 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/hbase/meta/1588230740 2024-11-19T04:53:19,522 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T04:53:19,522 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T04:53:19,523 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T04:53:19,526 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T04:53:19,528 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=700195, jitterRate=-0.1096561849117279}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T04:53:19,528 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T04:53:19,529 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731991999503Writing region info on filesystem at 1731991999504 (+1 ms)Initializing all the Stores at 1731991999505 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731991999505Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731991999505Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731991999505Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731991999505Cleaning up temporary data from old regions at 1731991999523 (+18 ms)Running coprocessor post-open hooks at 1731991999528 (+5 ms)Region opened successfully at 1731991999529 (+1 ms) 2024-11-19T04:53:19,536 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731991999450 2024-11-19T04:53:19,548 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T04:53:19,549 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T04:53:19,550 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=08a7f35e60d4,36677,1731991997423 2024-11-19T04:53:19,552 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 08a7f35e60d4,36677,1731991997423, state=OPEN 2024-11-19T04:53:19,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37341-0x1012e92708a0000, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T04:53:19,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36677-0x1012e92708a0001, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T04:53:19,558 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T04:53:19,558 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T04:53:19,558 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=08a7f35e60d4,36677,1731991997423 2024-11-19T04:53:19,564 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T04:53:19,564 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=08a7f35e60d4,36677,1731991997423 in 274 msec 2024-11-19T04:53:19,570 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T04:53:19,570 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 728 msec 2024-11-19T04:53:19,572 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T04:53:19,572 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T04:53:19,593 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T04:53:19,594 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=08a7f35e60d4,36677,1731991997423, seqNum=-1] 2024-11-19T04:53:19,615 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T04:53:19,617 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50535, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T04:53:19,647 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 976 msec 2024-11-19T04:53:19,647 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731991999647, completionTime=-1 2024-11-19T04:53:19,650 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T04:53:19,650 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-19T04:53:19,679 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-19T04:53:19,679 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731992059679 2024-11-19T04:53:19,679 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731992119679 2024-11-19T04:53:19,679 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 29 msec 2024-11-19T04:53:19,683 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,37341,1731991996736-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T04:53:19,683 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,37341,1731991996736-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:53:19,683 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,37341,1731991996736-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:53:19,685 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-08a7f35e60d4:37341, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:53:19,685 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T04:53:19,686 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T04:53:19,691 DEBUG [master/08a7f35e60d4:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T04:53:19,714 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.144sec 2024-11-19T04:53:19,715 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T04:53:19,716 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T04:53:19,717 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T04:53:19,718 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T04:53:19,718 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T04:53:19,719 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,37341,1731991996736-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T04:53:19,720 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,37341,1731991996736-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T04:53:19,728 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T04:53:19,729 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T04:53:19,730 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,37341,1731991996736-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:53:19,824 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b598c24, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T04:53:19,826 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-19T04:53:19,826 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-19T04:53:19,830 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 08a7f35e60d4,37341,-1 for getting cluster id 2024-11-19T04:53:19,833 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T04:53:19,842 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c1bcc81e-7fa7-42a1-aaa0-596013051a95' 2024-11-19T04:53:19,844 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T04:53:19,845 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c1bcc81e-7fa7-42a1-aaa0-596013051a95" 2024-11-19T04:53:19,845 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77c61835, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T04:53:19,845 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [08a7f35e60d4,37341,-1] 2024-11-19T04:53:19,848 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T04:53:19,849 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:53:19,851 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41588, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T04:53:19,854 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36e15a4d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T04:53:19,855 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T04:53:19,862 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=08a7f35e60d4,36677,1731991997423, seqNum=-1] 2024-11-19T04:53:19,862 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T04:53:19,865 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51148, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T04:53:19,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=08a7f35e60d4,37341,1731991996736 2024-11-19T04:53:19,886 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:53:19,894 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-19T04:53:19,897 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-19T04:53:19,902 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 08a7f35e60d4,37341,1731991996736 2024-11-19T04:53:19,905 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@23b4f284 2024-11-19T04:53:19,905 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-19T04:53:19,908 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41598, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-19T04:53:19,910 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37341 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-19T04:53:19,910 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37341 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-19T04:53:19,913 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37341 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T04:53:19,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37341 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-19T04:53:19,923 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-19T04:53:19,925 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37341 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-19T04:53:19,926 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:53:19,928 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-19T04:53:19,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37341 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T04:53:19,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741835_1011 (size=389) 2024-11-19T04:53:19,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40499 is added to blk_1073741835_1011 (size=389) 2024-11-19T04:53:19,973 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 058b808fba8dfe3b6b9e5455cccaa2e8, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731991999909.058b808fba8dfe3b6b9e5455cccaa2e8.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7 2024-11-19T04:53:19,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741836_1012 (size=72) 2024-11-19T04:53:19,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40499 is added to blk_1073741836_1012 (size=72) 2024-11-19T04:53:19,983 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731991999909.058b808fba8dfe3b6b9e5455cccaa2e8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:53:19,984 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 058b808fba8dfe3b6b9e5455cccaa2e8, disabling compactions & flushes 2024-11-19T04:53:19,984 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731991999909.058b808fba8dfe3b6b9e5455cccaa2e8. 2024-11-19T04:53:19,984 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731991999909.058b808fba8dfe3b6b9e5455cccaa2e8. 2024-11-19T04:53:19,984 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731991999909.058b808fba8dfe3b6b9e5455cccaa2e8. after waiting 0 ms 2024-11-19T04:53:19,984 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731991999909.058b808fba8dfe3b6b9e5455cccaa2e8. 2024-11-19T04:53:19,984 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731991999909.058b808fba8dfe3b6b9e5455cccaa2e8. 2024-11-19T04:53:19,984 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 058b808fba8dfe3b6b9e5455cccaa2e8: Waiting for close lock at 1731991999984Disabling compacts and flushes for region at 1731991999984Disabling writes for close at 1731991999984Writing region close event to WAL at 1731991999984Closed at 1731991999984 2024-11-19T04:53:19,986 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-19T04:53:19,991 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731991999909.058b808fba8dfe3b6b9e5455cccaa2e8.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731991999986"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731991999986"}]},"ts":"1731991999986"} 2024-11-19T04:53:19,997 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-19T04:53:19,998 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-19T04:53:20,001 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731991999999"}]},"ts":"1731991999999"} 2024-11-19T04:53:20,006 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-19T04:53:20,008 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=058b808fba8dfe3b6b9e5455cccaa2e8, ASSIGN}] 2024-11-19T04:53:20,010 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=058b808fba8dfe3b6b9e5455cccaa2e8, ASSIGN 2024-11-19T04:53:20,012 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=058b808fba8dfe3b6b9e5455cccaa2e8, ASSIGN; state=OFFLINE, location=08a7f35e60d4,36677,1731991997423; forceNewPlan=false, retain=false 2024-11-19T04:53:20,163 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=058b808fba8dfe3b6b9e5455cccaa2e8, regionState=OPENING, regionLocation=08a7f35e60d4,36677,1731991997423 2024-11-19T04:53:20,168 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=058b808fba8dfe3b6b9e5455cccaa2e8, ASSIGN because future has completed 2024-11-19T04:53:20,169 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 058b808fba8dfe3b6b9e5455cccaa2e8, server=08a7f35e60d4,36677,1731991997423}] 2024-11-19T04:53:20,330 INFO [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731991999909.058b808fba8dfe3b6b9e5455cccaa2e8. 2024-11-19T04:53:20,330 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 058b808fba8dfe3b6b9e5455cccaa2e8, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731991999909.058b808fba8dfe3b6b9e5455cccaa2e8.', STARTKEY => '', ENDKEY => ''} 2024-11-19T04:53:20,330 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 058b808fba8dfe3b6b9e5455cccaa2e8 2024-11-19T04:53:20,331 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731991999909.058b808fba8dfe3b6b9e5455cccaa2e8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:53:20,331 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 058b808fba8dfe3b6b9e5455cccaa2e8 2024-11-19T04:53:20,331 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 058b808fba8dfe3b6b9e5455cccaa2e8 2024-11-19T04:53:20,333 INFO [StoreOpener-058b808fba8dfe3b6b9e5455cccaa2e8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 058b808fba8dfe3b6b9e5455cccaa2e8 2024-11-19T04:53:20,336 INFO [StoreOpener-058b808fba8dfe3b6b9e5455cccaa2e8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 058b808fba8dfe3b6b9e5455cccaa2e8 columnFamilyName info 2024-11-19T04:53:20,336 DEBUG [StoreOpener-058b808fba8dfe3b6b9e5455cccaa2e8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:53:20,337 INFO [StoreOpener-058b808fba8dfe3b6b9e5455cccaa2e8-1 {}] regionserver.HStore(327): Store=058b808fba8dfe3b6b9e5455cccaa2e8/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T04:53:20,338 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 058b808fba8dfe3b6b9e5455cccaa2e8 2024-11-19T04:53:20,339 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8 2024-11-19T04:53:20,339 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8 2024-11-19T04:53:20,340 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 058b808fba8dfe3b6b9e5455cccaa2e8 2024-11-19T04:53:20,340 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 058b808fba8dfe3b6b9e5455cccaa2e8 2024-11-19T04:53:20,342 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 058b808fba8dfe3b6b9e5455cccaa2e8 2024-11-19T04:53:20,346 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T04:53:20,346 INFO [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 058b808fba8dfe3b6b9e5455cccaa2e8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=772403, jitterRate=-0.01783984899520874}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T04:53:20,347 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 058b808fba8dfe3b6b9e5455cccaa2e8 2024-11-19T04:53:20,348 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 058b808fba8dfe3b6b9e5455cccaa2e8: Running coprocessor pre-open hook at 1731992000331Writing region info on filesystem at 1731992000331Initializing all the Stores at 1731992000333 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992000333Cleaning up temporary data from old regions at 1731992000340 (+7 ms)Running coprocessor post-open hooks at 1731992000347 (+7 ms)Region opened successfully at 1731992000348 (+1 ms) 2024-11-19T04:53:20,350 INFO [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731991999909.058b808fba8dfe3b6b9e5455cccaa2e8., pid=6, masterSystemTime=1731992000323 2024-11-19T04:53:20,354 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731991999909.058b808fba8dfe3b6b9e5455cccaa2e8. 2024-11-19T04:53:20,354 INFO [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731991999909.058b808fba8dfe3b6b9e5455cccaa2e8. 2024-11-19T04:53:20,355 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=058b808fba8dfe3b6b9e5455cccaa2e8, regionState=OPEN, openSeqNum=2, regionLocation=08a7f35e60d4,36677,1731991997423 2024-11-19T04:53:20,359 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 058b808fba8dfe3b6b9e5455cccaa2e8, server=08a7f35e60d4,36677,1731991997423 because future has completed 2024-11-19T04:53:20,366 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-19T04:53:20,366 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 058b808fba8dfe3b6b9e5455cccaa2e8, server=08a7f35e60d4,36677,1731991997423 in 193 msec 2024-11-19T04:53:20,370 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-19T04:53:20,370 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=058b808fba8dfe3b6b9e5455cccaa2e8, ASSIGN in 359 msec 2024-11-19T04:53:20,372 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-19T04:53:20,372 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731992000372"}]},"ts":"1731992000372"} 2024-11-19T04:53:20,375 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-19T04:53:20,377 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-19T04:53:20,380 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 461 msec 2024-11-19T04:53:25,007 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-19T04:53:25,058 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-19T04:53:25,059 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-19T04:53:27,147 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T04:53:27,147 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-19T04:53:27,149 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-19T04:53:27,149 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-19T04:53:27,150 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T04:53:27,150 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-19T04:53:27,150 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-19T04:53:27,150 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-19T04:53:29,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37341 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T04:53:29,989 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-19T04:53:29,992 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-19T04:53:29,998 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-19T04:53:29,999 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731991999909.058b808fba8dfe3b6b9e5455cccaa2e8. 2024-11-19T04:53:30,000 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C36677%2C1731991997423.1731992010000 2024-11-19T04:53:30,010 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:53:30,010 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:53:30,010 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:53:30,010 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:53:30,010 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:53:30,011 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423/08a7f35e60d4%2C36677%2C1731991997423.1731991999080 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423/08a7f35e60d4%2C36677%2C1731991997423.1731992010000 2024-11-19T04:53:30,012 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44923:44923),(127.0.0.1/127.0.0.1:44957:44957)] 2024-11-19T04:53:30,012 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423/08a7f35e60d4%2C36677%2C1731991997423.1731991999080 is not closed yet, will try archiving it next time 2024-11-19T04:53:30,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40499 is added to blk_1073741833_1009 (size=451) 2024-11-19T04:53:30,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741833_1009 (size=451) 2024-11-19T04:53:30,017 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423/08a7f35e60d4%2C36677%2C1731991997423.1731991999080 to hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/oldWALs/08a7f35e60d4%2C36677%2C1731991997423.1731991999080 2024-11-19T04:53:30,021 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731991999909.058b808fba8dfe3b6b9e5455cccaa2e8., hostname=08a7f35e60d4,36677,1731991997423, seqNum=2] 2024-11-19T04:53:42,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36677 {}] regionserver.HRegion(8855): Flush requested on 058b808fba8dfe3b6b9e5455cccaa2e8 2024-11-19T04:53:42,057 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 058b808fba8dfe3b6b9e5455cccaa2e8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T04:53:42,116 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/.tmp/info/b33ee37a9e0641869b46671a744b637e is 1080, key is row0001/info:/1731992010024/Put/seqid=0 2024-11-19T04:53:42,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40499 is added to blk_1073741838_1014 (size=12509) 2024-11-19T04:53:42,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741838_1014 (size=12509) 2024-11-19T04:53:42,130 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/.tmp/info/b33ee37a9e0641869b46671a744b637e 2024-11-19T04:53:42,179 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/.tmp/info/b33ee37a9e0641869b46671a744b637e as hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/info/b33ee37a9e0641869b46671a744b637e 2024-11-19T04:53:42,189 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/info/b33ee37a9e0641869b46671a744b637e, entries=7, sequenceid=11, filesize=12.2 K 2024-11-19T04:53:42,196 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 058b808fba8dfe3b6b9e5455cccaa2e8 in 140ms, sequenceid=11, compaction requested=false 2024-11-19T04:53:42,197 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 058b808fba8dfe3b6b9e5455cccaa2e8: 2024-11-19T04:53:45,840 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T04:53:50,067 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C36677%2C1731991997423.1731992030066 2024-11-19T04:53:50,276 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38529,DS-cae856b5-a944-46bf-8d2a-259fe71de563,DISK], DatanodeInfoWithStorage[127.0.0.1:40499,DS-a6a75b9d-a9bc-4ec1-9c88-33ab7d803ded,DISK]] 2024-11-19T04:53:50,276 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:53:50,276 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:53:50,276 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:53:50,276 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:53:50,277 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:53:50,277 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423/08a7f35e60d4%2C36677%2C1731991997423.1731992010000 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423/08a7f35e60d4%2C36677%2C1731991997423.1731992030066 2024-11-19T04:53:50,278 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44923:44923),(127.0.0.1/127.0.0.1:44957:44957)] 2024-11-19T04:53:50,278 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423/08a7f35e60d4%2C36677%2C1731991997423.1731992010000 is not closed yet, will try archiving it next time 2024-11-19T04:53:50,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40499 is added to blk_1073741837_1013 (size=12399) 2024-11-19T04:53:50,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741837_1013 (size=12399) 2024-11-19T04:53:50,482 INFO [FSHLog-0-hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7-prefix:08a7f35e60d4,36677,1731991997423 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38529,DS-cae856b5-a944-46bf-8d2a-259fe71de563,DISK], DatanodeInfoWithStorage[127.0.0.1:40499,DS-a6a75b9d-a9bc-4ec1-9c88-33ab7d803ded,DISK]] 2024-11-19T04:53:52,686 INFO [FSHLog-0-hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7-prefix:08a7f35e60d4,36677,1731991997423 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38529,DS-cae856b5-a944-46bf-8d2a-259fe71de563,DISK], DatanodeInfoWithStorage[127.0.0.1:40499,DS-a6a75b9d-a9bc-4ec1-9c88-33ab7d803ded,DISK]] 2024-11-19T04:53:54,890 INFO [FSHLog-0-hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7-prefix:08a7f35e60d4,36677,1731991997423 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38529,DS-cae856b5-a944-46bf-8d2a-259fe71de563,DISK], DatanodeInfoWithStorage[127.0.0.1:40499,DS-a6a75b9d-a9bc-4ec1-9c88-33ab7d803ded,DISK]] 2024-11-19T04:53:57,095 INFO [FSHLog-0-hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7-prefix:08a7f35e60d4,36677,1731991997423 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38529,DS-cae856b5-a944-46bf-8d2a-259fe71de563,DISK], DatanodeInfoWithStorage[127.0.0.1:40499,DS-a6a75b9d-a9bc-4ec1-9c88-33ab7d803ded,DISK]] 2024-11-19T04:53:57,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36677 {}] regionserver.HRegion(8855): Flush requested on 058b808fba8dfe3b6b9e5455cccaa2e8 2024-11-19T04:53:57,096 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 058b808fba8dfe3b6b9e5455cccaa2e8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T04:53:57,298 INFO [FSHLog-0-hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7-prefix:08a7f35e60d4,36677,1731991997423 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38529,DS-cae856b5-a944-46bf-8d2a-259fe71de563,DISK], DatanodeInfoWithStorage[127.0.0.1:40499,DS-a6a75b9d-a9bc-4ec1-9c88-33ab7d803ded,DISK]] 2024-11-19T04:53:57,303 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/.tmp/info/88aeb831f73e417dbfe19f94e5470267 is 1080, key is row0008/info:/1731992024055/Put/seqid=0 2024-11-19T04:53:57,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741840_1016 (size=12509) 2024-11-19T04:53:57,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40499 is added to blk_1073741840_1016 (size=12509) 2024-11-19T04:53:57,312 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/.tmp/info/88aeb831f73e417dbfe19f94e5470267 2024-11-19T04:53:57,322 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/.tmp/info/88aeb831f73e417dbfe19f94e5470267 as hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/info/88aeb831f73e417dbfe19f94e5470267 2024-11-19T04:53:57,332 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/info/88aeb831f73e417dbfe19f94e5470267, entries=7, sequenceid=21, filesize=12.2 K 2024-11-19T04:53:57,534 INFO [FSHLog-0-hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7-prefix:08a7f35e60d4,36677,1731991997423 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38529,DS-cae856b5-a944-46bf-8d2a-259fe71de563,DISK], DatanodeInfoWithStorage[127.0.0.1:40499,DS-a6a75b9d-a9bc-4ec1-9c88-33ab7d803ded,DISK]] 2024-11-19T04:53:57,534 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 058b808fba8dfe3b6b9e5455cccaa2e8 in 438ms, sequenceid=21, compaction requested=false 2024-11-19T04:53:57,534 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 058b808fba8dfe3b6b9e5455cccaa2e8: 2024-11-19T04:53:57,534 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-19T04:53:57,534 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T04:53:57,535 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/info/b33ee37a9e0641869b46671a744b637e because midkey is the same as first or last row 2024-11-19T04:53:59,299 INFO [FSHLog-0-hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7-prefix:08a7f35e60d4,36677,1731991997423 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38529,DS-cae856b5-a944-46bf-8d2a-259fe71de563,DISK], DatanodeInfoWithStorage[127.0.0.1:40499,DS-a6a75b9d-a9bc-4ec1-9c88-33ab7d803ded,DISK]] 2024-11-19T04:53:59,857 INFO [master/08a7f35e60d4:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-19T04:53:59,857 INFO [master/08a7f35e60d4:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-19T04:54:01,506 INFO [FSHLog-0-hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7-prefix:08a7f35e60d4,36677,1731991997423 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38529,DS-cae856b5-a944-46bf-8d2a-259fe71de563,DISK], DatanodeInfoWithStorage[127.0.0.1:40499,DS-a6a75b9d-a9bc-4ec1-9c88-33ab7d803ded,DISK]] 2024-11-19T04:54:01,508 WARN [FSHLog-0-hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7-prefix:08a7f35e60d4,36677,1731991997423 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38529,DS-cae856b5-a944-46bf-8d2a-259fe71de563,DISK], DatanodeInfoWithStorage[127.0.0.1:40499,DS-a6a75b9d-a9bc-4ec1-9c88-33ab7d803ded,DISK]] 2024-11-19T04:54:01,509 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 08a7f35e60d4%2C36677%2C1731991997423:(num 1731992030066) roll requested 2024-11-19T04:54:01,510 INFO [regionserver/08a7f35e60d4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C36677%2C1731991997423.1731992041510 2024-11-19T04:54:01,719 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38529,DS-cae856b5-a944-46bf-8d2a-259fe71de563,DISK], DatanodeInfoWithStorage[127.0.0.1:40499,DS-a6a75b9d-a9bc-4ec1-9c88-33ab7d803ded,DISK]] 2024-11-19T04:54:01,719 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:01,720 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:01,720 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:01,720 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:01,720 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:01,720 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423/08a7f35e60d4%2C36677%2C1731991997423.1731992030066 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423/08a7f35e60d4%2C36677%2C1731991997423.1731992041510 2024-11-19T04:54:01,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40499 is added to blk_1073741839_1015 (size=7739) 2024-11-19T04:54:01,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741839_1015 (size=7739) 2024-11-19T04:54:01,727 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44957:44957),(127.0.0.1/127.0.0.1:44923:44923)] 2024-11-19T04:54:01,727 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423/08a7f35e60d4%2C36677%2C1731991997423.1731992030066 is not closed yet, will try archiving it next time 2024-11-19T04:54:01,727 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423/08a7f35e60d4%2C36677%2C1731991997423.1731992010000 to hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/oldWALs/08a7f35e60d4%2C36677%2C1731991997423.1731992010000 2024-11-19T04:54:03,710 INFO [FSHLog-0-hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7-prefix:08a7f35e60d4,36677,1731991997423 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40499,DS-a6a75b9d-a9bc-4ec1-9c88-33ab7d803ded,DISK], DatanodeInfoWithStorage[127.0.0.1:38529,DS-cae856b5-a944-46bf-8d2a-259fe71de563,DISK]] 2024-11-19T04:54:05,331 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 058b808fba8dfe3b6b9e5455cccaa2e8, had cached 0 bytes from a total of 25018 2024-11-19T04:54:05,914 INFO [FSHLog-0-hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7-prefix:08a7f35e60d4,36677,1731991997423 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40499,DS-a6a75b9d-a9bc-4ec1-9c88-33ab7d803ded,DISK], DatanodeInfoWithStorage[127.0.0.1:38529,DS-cae856b5-a944-46bf-8d2a-259fe71de563,DISK]] 2024-11-19T04:54:08,118 INFO [FSHLog-0-hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7-prefix:08a7f35e60d4,36677,1731991997423 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40499,DS-a6a75b9d-a9bc-4ec1-9c88-33ab7d803ded,DISK], DatanodeInfoWithStorage[127.0.0.1:38529,DS-cae856b5-a944-46bf-8d2a-259fe71de563,DISK]] 2024-11-19T04:54:10,322 INFO [FSHLog-0-hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7-prefix:08a7f35e60d4,36677,1731991997423 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40499,DS-a6a75b9d-a9bc-4ec1-9c88-33ab7d803ded,DISK], DatanodeInfoWithStorage[127.0.0.1:38529,DS-cae856b5-a944-46bf-8d2a-259fe71de563,DISK]] 2024-11-19T04:54:12,325 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-19T04:54:12,325 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C36677%2C1731991997423.1731992052325 2024-11-19T04:54:15,840 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T04:54:17,338 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40499,DS-a6a75b9d-a9bc-4ec1-9c88-33ab7d803ded,DISK], DatanodeInfoWithStorage[127.0.0.1:38529,DS-cae856b5-a944-46bf-8d2a-259fe71de563,DISK]] 2024-11-19T04:54:17,340 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40499,DS-a6a75b9d-a9bc-4ec1-9c88-33ab7d803ded,DISK], DatanodeInfoWithStorage[127.0.0.1:38529,DS-cae856b5-a944-46bf-8d2a-259fe71de563,DISK]] 2024-11-19T04:54:17,340 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 08a7f35e60d4%2C36677%2C1731991997423:(num 1731992052325) roll requested 2024-11-19T04:54:17,340 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:17,341 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:17,341 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:17,341 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:17,341 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:17,341 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423/08a7f35e60d4%2C36677%2C1731991997423.1731992041510 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423/08a7f35e60d4%2C36677%2C1731991997423.1731992052325 2024-11-19T04:54:17,342 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44923:44923),(127.0.0.1/127.0.0.1:44957:44957)] 2024-11-19T04:54:17,343 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423/08a7f35e60d4%2C36677%2C1731991997423.1731992041510 is not closed yet, will try archiving it next time 2024-11-19T04:54:17,343 INFO [regionserver/08a7f35e60d4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C36677%2C1731991997423.1731992057343 2024-11-19T04:54:17,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741841_1017 (size=4753) 2024-11-19T04:54:17,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40499 is added to blk_1073741841_1017 (size=4753) 2024-11-19T04:54:22,346 INFO [FSHLog-0-hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7-prefix:08a7f35e60d4,36677,1731991997423 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38529,DS-cae856b5-a944-46bf-8d2a-259fe71de563,DISK], DatanodeInfoWithStorage[127.0.0.1:40499,DS-a6a75b9d-a9bc-4ec1-9c88-33ab7d803ded,DISK]] 2024-11-19T04:54:22,346 WARN [FSHLog-0-hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7-prefix:08a7f35e60d4,36677,1731991997423 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38529,DS-cae856b5-a944-46bf-8d2a-259fe71de563,DISK], DatanodeInfoWithStorage[127.0.0.1:40499,DS-a6a75b9d-a9bc-4ec1-9c88-33ab7d803ded,DISK]] 2024-11-19T04:54:22,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36677 {}] regionserver.HRegion(8855): Flush requested on 058b808fba8dfe3b6b9e5455cccaa2e8 2024-11-19T04:54:22,347 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 058b808fba8dfe3b6b9e5455cccaa2e8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T04:54:22,354 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38529,DS-cae856b5-a944-46bf-8d2a-259fe71de563,DISK], DatanodeInfoWithStorage[127.0.0.1:40499,DS-a6a75b9d-a9bc-4ec1-9c88-33ab7d803ded,DISK]] 2024-11-19T04:54:22,354 WARN [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38529,DS-cae856b5-a944-46bf-8d2a-259fe71de563,DISK], DatanodeInfoWithStorage[127.0.0.1:40499,DS-a6a75b9d-a9bc-4ec1-9c88-33ab7d803ded,DISK]] 2024-11-19T04:54:24,347 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-19T04:54:27,349 INFO [FSHLog-0-hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7-prefix:08a7f35e60d4,36677,1731991997423 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38529,DS-cae856b5-a944-46bf-8d2a-259fe71de563,DISK], DatanodeInfoWithStorage[127.0.0.1:40499,DS-a6a75b9d-a9bc-4ec1-9c88-33ab7d803ded,DISK]] 2024-11-19T04:54:27,349 WARN [FSHLog-0-hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7-prefix:08a7f35e60d4,36677,1731991997423 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38529,DS-cae856b5-a944-46bf-8d2a-259fe71de563,DISK], DatanodeInfoWithStorage[127.0.0.1:40499,DS-a6a75b9d-a9bc-4ec1-9c88-33ab7d803ded,DISK]] 2024-11-19T04:54:27,350 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:27,350 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:27,350 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:27,350 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:27,350 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:27,351 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423/08a7f35e60d4%2C36677%2C1731991997423.1731992052325 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423/08a7f35e60d4%2C36677%2C1731991997423.1731992057343 2024-11-19T04:54:27,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741842_1018 (size=1569) 2024-11-19T04:54:27,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40499 is added to blk_1073741842_1018 (size=1569) 2024-11-19T04:54:27,363 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/.tmp/info/e6f848f7ac6447b6be1d74e2a54356c7 is 1080, key is row0015/info:/1731992039098/Put/seqid=0 2024-11-19T04:54:27,366 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44957:44957),(127.0.0.1/127.0.0.1:44923:44923)] 2024-11-19T04:54:27,366 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 08a7f35e60d4%2C36677%2C1731991997423:(num 1731992057343) roll requested 2024-11-19T04:54:27,366 INFO [regionserver/08a7f35e60d4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C36677%2C1731991997423.1731992067366 2024-11-19T04:54:27,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741844_1020 (size=12509) 2024-11-19T04:54:27,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40499 is added to blk_1073741844_1020 (size=12509) 2024-11-19T04:54:27,385 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/.tmp/info/e6f848f7ac6447b6be1d74e2a54356c7 2024-11-19T04:54:27,396 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/.tmp/info/e6f848f7ac6447b6be1d74e2a54356c7 as hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/info/e6f848f7ac6447b6be1d74e2a54356c7 2024-11-19T04:54:27,405 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/info/e6f848f7ac6447b6be1d74e2a54356c7, entries=7, sequenceid=31, filesize=12.2 K 2024-11-19T04:54:32,379 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40499,DS-a6a75b9d-a9bc-4ec1-9c88-33ab7d803ded,DISK], DatanodeInfoWithStorage[127.0.0.1:38529,DS-cae856b5-a944-46bf-8d2a-259fe71de563,DISK]] 2024-11-19T04:54:32,380 WARN [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40499,DS-a6a75b9d-a9bc-4ec1-9c88-33ab7d803ded,DISK], DatanodeInfoWithStorage[127.0.0.1:38529,DS-cae856b5-a944-46bf-8d2a-259fe71de563,DISK]] 2024-11-19T04:54:32,407 INFO [FSHLog-0-hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7-prefix:08a7f35e60d4,36677,1731991997423 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40499,DS-a6a75b9d-a9bc-4ec1-9c88-33ab7d803ded,DISK], DatanodeInfoWithStorage[127.0.0.1:38529,DS-cae856b5-a944-46bf-8d2a-259fe71de563,DISK]] 2024-11-19T04:54:32,407 WARN [FSHLog-0-hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7-prefix:08a7f35e60d4,36677,1731991997423 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40499,DS-a6a75b9d-a9bc-4ec1-9c88-33ab7d803ded,DISK], DatanodeInfoWithStorage[127.0.0.1:38529,DS-cae856b5-a944-46bf-8d2a-259fe71de563,DISK]] 2024-11-19T04:54:32,407 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 058b808fba8dfe3b6b9e5455cccaa2e8 in 10060ms, sequenceid=31, compaction requested=true 2024-11-19T04:54:32,408 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 058b808fba8dfe3b6b9e5455cccaa2e8: 2024-11-19T04:54:32,408 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:32,408 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:32,408 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-19T04:54:32,408 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T04:54:32,408 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:32,408 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:32,408 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/info/b33ee37a9e0641869b46671a744b637e because midkey is the same as first or last row 2024-11-19T04:54:32,408 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:32,408 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423/08a7f35e60d4%2C36677%2C1731991997423.1731992057343 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423/08a7f35e60d4%2C36677%2C1731991997423.1731992067366 2024-11-19T04:54:32,409 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44923:44923),(127.0.0.1/127.0.0.1:44957:44957)] 2024-11-19T04:54:32,409 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423/08a7f35e60d4%2C36677%2C1731991997423.1731992057343 is not closed yet, will try archiving it next time 2024-11-19T04:54:32,410 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423/08a7f35e60d4%2C36677%2C1731991997423.1731992030066 to hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/oldWALs/08a7f35e60d4%2C36677%2C1731991997423.1731992030066 2024-11-19T04:54:32,410 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 058b808fba8dfe3b6b9e5455cccaa2e8:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T04:54:32,410 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 08a7f35e60d4%2C36677%2C1731991997423:(num 1731992072410) roll requested 2024-11-19T04:54:32,410 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C36677%2C1731991997423.1731992072410 2024-11-19T04:54:32,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741843_1019 (size=438) 2024-11-19T04:54:32,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40499 is added to blk_1073741843_1019 (size=438) 2024-11-19T04:54:32,412 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T04:54:32,412 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423/08a7f35e60d4%2C36677%2C1731991997423.1731992041510 to hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/oldWALs/08a7f35e60d4%2C36677%2C1731991997423.1731992041510 2024-11-19T04:54:32,414 DEBUG [RS:0;08a7f35e60d4:36677-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T04:54:32,414 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423/08a7f35e60d4%2C36677%2C1731991997423.1731992052325 to hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/oldWALs/08a7f35e60d4%2C36677%2C1731991997423.1731992052325 2024-11-19T04:54:32,416 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423/08a7f35e60d4%2C36677%2C1731991997423.1731992057343 to hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/oldWALs/08a7f35e60d4%2C36677%2C1731991997423.1731992057343 2024-11-19T04:54:32,417 DEBUG [RS:0;08a7f35e60d4:36677-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T04:54:32,419 DEBUG [RS:0;08a7f35e60d4:36677-shortCompactions-0 {}] regionserver.HStore(1541): 058b808fba8dfe3b6b9e5455cccaa2e8/info is initiating minor compaction (all files) 2024-11-19T04:54:32,419 INFO [RS:0;08a7f35e60d4:36677-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 058b808fba8dfe3b6b9e5455cccaa2e8/info in TestLogRolling-testSlowSyncLogRolling,,1731991999909.058b808fba8dfe3b6b9e5455cccaa2e8. 2024-11-19T04:54:32,420 INFO [RS:0;08a7f35e60d4:36677-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/info/b33ee37a9e0641869b46671a744b637e, hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/info/88aeb831f73e417dbfe19f94e5470267, hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/info/e6f848f7ac6447b6be1d74e2a54356c7] into tmpdir=hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/.tmp, totalSize=36.6 K 2024-11-19T04:54:32,421 DEBUG [RS:0;08a7f35e60d4:36677-shortCompactions-0 {}] compactions.Compactor(225): Compacting b33ee37a9e0641869b46671a744b637e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731992010024 2024-11-19T04:54:32,422 DEBUG [RS:0;08a7f35e60d4:36677-shortCompactions-0 {}] compactions.Compactor(225): Compacting 88aeb831f73e417dbfe19f94e5470267, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731992024055 2024-11-19T04:54:32,423 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:32,423 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:32,423 DEBUG [RS:0;08a7f35e60d4:36677-shortCompactions-0 {}] compactions.Compactor(225): Compacting e6f848f7ac6447b6be1d74e2a54356c7, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731992039098 2024-11-19T04:54:32,423 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:32,423 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:32,423 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:32,423 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423/08a7f35e60d4%2C36677%2C1731991997423.1731992067366 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423/08a7f35e60d4%2C36677%2C1731991997423.1731992072410 2024-11-19T04:54:32,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40499 is added to blk_1073741845_1021 (size=93) 2024-11-19T04:54:32,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741845_1021 (size=93) 2024-11-19T04:54:32,434 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44923:44923),(127.0.0.1/127.0.0.1:44957:44957)] 2024-11-19T04:54:32,434 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423/08a7f35e60d4%2C36677%2C1731991997423.1731992067366 is not closed yet, will try archiving it next time 2024-11-19T04:54:32,435 INFO [regionserver/08a7f35e60d4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C36677%2C1731991997423.1731992072434 2024-11-19T04:54:32,445 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:32,445 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:32,445 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:32,445 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:32,446 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:32,446 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423/08a7f35e60d4%2C36677%2C1731991997423.1731992072410 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423/08a7f35e60d4%2C36677%2C1731991997423.1731992072434 2024-11-19T04:54:32,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40499 is added to blk_1073741846_1022 (size=1258) 2024-11-19T04:54:32,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741846_1022 (size=1258) 2024-11-19T04:54:32,450 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423/08a7f35e60d4%2C36677%2C1731991997423.1731992067366 is not closed yet, will try archiving it next time 2024-11-19T04:54:32,457 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44923:44923),(127.0.0.1/127.0.0.1:44957:44957)] 2024-11-19T04:54:32,457 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423/08a7f35e60d4%2C36677%2C1731991997423.1731992067366 is not closed yet, will try archiving it next time 2024-11-19T04:54:32,462 INFO [RS:0;08a7f35e60d4:36677-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 058b808fba8dfe3b6b9e5455cccaa2e8#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T04:54:32,463 DEBUG [RS:0;08a7f35e60d4:36677-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/.tmp/info/c2420ab1672e4867bb11aace70ababc0 is 1080, key is row0001/info:/1731992010024/Put/seqid=0 2024-11-19T04:54:32,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40499 is added to blk_1073741848_1024 (size=27710) 2024-11-19T04:54:32,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741848_1024 (size=27710) 2024-11-19T04:54:32,482 DEBUG [RS:0;08a7f35e60d4:36677-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/.tmp/info/c2420ab1672e4867bb11aace70ababc0 as hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/info/c2420ab1672e4867bb11aace70ababc0 2024-11-19T04:54:32,504 INFO [RS:0;08a7f35e60d4:36677-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 058b808fba8dfe3b6b9e5455cccaa2e8/info of 058b808fba8dfe3b6b9e5455cccaa2e8 into c2420ab1672e4867bb11aace70ababc0(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T04:54:32,504 DEBUG [RS:0;08a7f35e60d4:36677-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 058b808fba8dfe3b6b9e5455cccaa2e8: 2024-11-19T04:54:32,506 INFO [RS:0;08a7f35e60d4:36677-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731991999909.058b808fba8dfe3b6b9e5455cccaa2e8., storeName=058b808fba8dfe3b6b9e5455cccaa2e8/info, priority=13, startTime=1731992072409; duration=0sec 2024-11-19T04:54:32,507 DEBUG [RS:0;08a7f35e60d4:36677-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-19T04:54:32,507 DEBUG [RS:0;08a7f35e60d4:36677-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T04:54:32,507 DEBUG [RS:0;08a7f35e60d4:36677-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/info/c2420ab1672e4867bb11aace70ababc0 because midkey is the same as first or last row 2024-11-19T04:54:32,507 DEBUG [RS:0;08a7f35e60d4:36677-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-19T04:54:32,507 DEBUG [RS:0;08a7f35e60d4:36677-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T04:54:32,507 DEBUG [RS:0;08a7f35e60d4:36677-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/info/c2420ab1672e4867bb11aace70ababc0 because midkey is the same as first or last row 2024-11-19T04:54:32,508 DEBUG [RS:0;08a7f35e60d4:36677-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-19T04:54:32,508 DEBUG [RS:0;08a7f35e60d4:36677-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T04:54:32,508 DEBUG [RS:0;08a7f35e60d4:36677-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/info/c2420ab1672e4867bb11aace70ababc0 because midkey is the same as first or last row 2024-11-19T04:54:32,508 DEBUG [RS:0;08a7f35e60d4:36677-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T04:54:32,508 DEBUG [RS:0;08a7f35e60d4:36677-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 058b808fba8dfe3b6b9e5455cccaa2e8:info 2024-11-19T04:54:32,828 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/WALs/08a7f35e60d4,36677,1731991997423/08a7f35e60d4%2C36677%2C1731991997423.1731992067366 to hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/oldWALs/08a7f35e60d4%2C36677%2C1731991997423.1731992067366 2024-11-19T04:54:44,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36677 {}] regionserver.HRegion(8855): Flush requested on 058b808fba8dfe3b6b9e5455cccaa2e8 2024-11-19T04:54:44,460 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 058b808fba8dfe3b6b9e5455cccaa2e8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T04:54:44,466 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/.tmp/info/b351b873103e4f0e8f6bb144b39b9a9d is 1080, key is row0022/info:/1731992072436/Put/seqid=0 2024-11-19T04:54:44,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40499 is added to blk_1073741849_1025 (size=12509) 2024-11-19T04:54:44,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741849_1025 (size=12509) 2024-11-19T04:54:44,474 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/.tmp/info/b351b873103e4f0e8f6bb144b39b9a9d 2024-11-19T04:54:44,483 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/.tmp/info/b351b873103e4f0e8f6bb144b39b9a9d as hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/info/b351b873103e4f0e8f6bb144b39b9a9d 2024-11-19T04:54:44,491 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/info/b351b873103e4f0e8f6bb144b39b9a9d, entries=7, sequenceid=42, filesize=12.2 K 2024-11-19T04:54:44,493 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 058b808fba8dfe3b6b9e5455cccaa2e8 in 34ms, sequenceid=42, compaction requested=false 2024-11-19T04:54:44,493 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 058b808fba8dfe3b6b9e5455cccaa2e8: 2024-11-19T04:54:44,494 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-19T04:54:44,494 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T04:54:44,494 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/info/c2420ab1672e4867bb11aace70ababc0 because midkey is the same as first or last row 2024-11-19T04:54:45,840 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T04:54:50,331 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 058b808fba8dfe3b6b9e5455cccaa2e8, had cached 0 bytes from a total of 40219 2024-11-19T04:54:52,471 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T04:54:52,472 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T04:54:52,472 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T04:54:52,477 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:54:52,478 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:54:52,478 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T04:54:52,478 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T04:54:52,478 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=461911212, stopped=false 2024-11-19T04:54:52,478 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=08a7f35e60d4,37341,1731991996736 2024-11-19T04:54:52,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36677-0x1012e92708a0001, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T04:54:52,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37341-0x1012e92708a0000, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T04:54:52,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37341-0x1012e92708a0000, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:54:52,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36677-0x1012e92708a0001, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:54:52,481 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T04:54:52,481 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37341-0x1012e92708a0000, quorum=127.0.0.1:59995, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T04:54:52,481 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36677-0x1012e92708a0001, quorum=127.0.0.1:59995, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T04:54:52,481 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T04:54:52,481 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T04:54:52,481 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:54:52,482 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '08a7f35e60d4,36677,1731991997423' ***** 2024-11-19T04:54:52,482 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T04:54:52,482 INFO [RS:0;08a7f35e60d4:36677 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T04:54:52,482 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T04:54:52,482 INFO [RS:0;08a7f35e60d4:36677 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T04:54:52,483 INFO [RS:0;08a7f35e60d4:36677 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T04:54:52,483 INFO [RS:0;08a7f35e60d4:36677 {}] regionserver.HRegionServer(3091): Received CLOSE for 058b808fba8dfe3b6b9e5455cccaa2e8 2024-11-19T04:54:52,483 INFO [RS:0;08a7f35e60d4:36677 {}] regionserver.HRegionServer(959): stopping server 08a7f35e60d4,36677,1731991997423 2024-11-19T04:54:52,483 INFO [RS:0;08a7f35e60d4:36677 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T04:54:52,483 INFO [RS:0;08a7f35e60d4:36677 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;08a7f35e60d4:36677. 2024-11-19T04:54:52,483 DEBUG [RS:0;08a7f35e60d4:36677 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T04:54:52,483 DEBUG [RS:0;08a7f35e60d4:36677 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:54:52,484 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 058b808fba8dfe3b6b9e5455cccaa2e8, disabling compactions & flushes 2024-11-19T04:54:52,484 INFO [RS:0;08a7f35e60d4:36677 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T04:54:52,484 INFO [RS:0;08a7f35e60d4:36677 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T04:54:52,484 INFO [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731991999909.058b808fba8dfe3b6b9e5455cccaa2e8. 2024-11-19T04:54:52,484 INFO [RS:0;08a7f35e60d4:36677 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T04:54:52,484 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731991999909.058b808fba8dfe3b6b9e5455cccaa2e8. 2024-11-19T04:54:52,484 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731991999909.058b808fba8dfe3b6b9e5455cccaa2e8. after waiting 0 ms 2024-11-19T04:54:52,484 INFO [RS:0;08a7f35e60d4:36677 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T04:54:52,484 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731991999909.058b808fba8dfe3b6b9e5455cccaa2e8. 2024-11-19T04:54:52,484 INFO [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 058b808fba8dfe3b6b9e5455cccaa2e8 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-19T04:54:52,484 INFO [RS:0;08a7f35e60d4:36677 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-19T04:54:52,484 DEBUG [RS:0;08a7f35e60d4:36677 {}] regionserver.HRegionServer(1325): Online Regions={058b808fba8dfe3b6b9e5455cccaa2e8=TestLogRolling-testSlowSyncLogRolling,,1731991999909.058b808fba8dfe3b6b9e5455cccaa2e8., 1588230740=hbase:meta,,1.1588230740} 2024-11-19T04:54:52,484 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T04:54:52,484 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T04:54:52,484 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T04:54:52,485 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T04:54:52,485 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T04:54:52,485 DEBUG [RS:0;08a7f35e60d4:36677 {}] regionserver.HRegionServer(1351): Waiting on 058b808fba8dfe3b6b9e5455cccaa2e8, 1588230740 2024-11-19T04:54:52,485 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-19T04:54:52,490 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/.tmp/info/999a2ce4cb054bd6a2474831155d24f1 is 1080, key is row0029/info:/1731992086462/Put/seqid=0 2024-11-19T04:54:52,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741850_1026 (size=8193) 2024-11-19T04:54:52,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40499 is added to blk_1073741850_1026 (size=8193) 2024-11-19T04:54:52,501 INFO [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/.tmp/info/999a2ce4cb054bd6a2474831155d24f1 2024-11-19T04:54:52,510 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/.tmp/info/999a2ce4cb054bd6a2474831155d24f1 as hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/info/999a2ce4cb054bd6a2474831155d24f1 2024-11-19T04:54:52,510 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/hbase/meta/1588230740/.tmp/info/736c2cc3bc224c24bca01229f4b3b3ac is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731991999909.058b808fba8dfe3b6b9e5455cccaa2e8./info:regioninfo/1731992000355/Put/seqid=0 2024-11-19T04:54:52,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741851_1027 (size=7016) 2024-11-19T04:54:52,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40499 is added to blk_1073741851_1027 (size=7016) 2024-11-19T04:54:52,518 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/hbase/meta/1588230740/.tmp/info/736c2cc3bc224c24bca01229f4b3b3ac 2024-11-19T04:54:52,519 INFO [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/info/999a2ce4cb054bd6a2474831155d24f1, entries=3, sequenceid=48, filesize=8.0 K 2024-11-19T04:54:52,520 INFO [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 058b808fba8dfe3b6b9e5455cccaa2e8 in 36ms, sequenceid=48, compaction requested=true 2024-11-19T04:54:52,521 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731991999909.058b808fba8dfe3b6b9e5455cccaa2e8.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/info/b33ee37a9e0641869b46671a744b637e, hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/info/88aeb831f73e417dbfe19f94e5470267, hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/info/e6f848f7ac6447b6be1d74e2a54356c7] to archive 2024-11-19T04:54:52,524 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731991999909.058b808fba8dfe3b6b9e5455cccaa2e8.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T04:54:52,527 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731991999909.058b808fba8dfe3b6b9e5455cccaa2e8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/info/b33ee37a9e0641869b46671a744b637e to hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/archive/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/info/b33ee37a9e0641869b46671a744b637e 2024-11-19T04:54:52,529 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731991999909.058b808fba8dfe3b6b9e5455cccaa2e8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/info/88aeb831f73e417dbfe19f94e5470267 to hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/archive/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/info/88aeb831f73e417dbfe19f94e5470267 2024-11-19T04:54:52,531 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731991999909.058b808fba8dfe3b6b9e5455cccaa2e8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/info/e6f848f7ac6447b6be1d74e2a54356c7 to hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/archive/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/info/e6f848f7ac6447b6be1d74e2a54356c7 2024-11-19T04:54:52,546 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/hbase/meta/1588230740/.tmp/ns/a4f24ee9eeb6486282af80ee4bcb6b87 is 43, key is default/ns:d/1731991999622/Put/seqid=0 2024-11-19T04:54:52,543 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731991999909.058b808fba8dfe3b6b9e5455cccaa2e8.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=08a7f35e60d4:37341 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-19T04:54:52,548 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731991999909.058b808fba8dfe3b6b9e5455cccaa2e8.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [b33ee37a9e0641869b46671a744b637e=12509, 88aeb831f73e417dbfe19f94e5470267=12509, e6f848f7ac6447b6be1d74e2a54356c7=12509] 2024-11-19T04:54:52,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40499 is added to blk_1073741852_1028 (size=5153) 2024-11-19T04:54:52,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741852_1028 (size=5153) 2024-11-19T04:54:52,555 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/hbase/meta/1588230740/.tmp/ns/a4f24ee9eeb6486282af80ee4bcb6b87 2024-11-19T04:54:52,555 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/default/TestLogRolling-testSlowSyncLogRolling/058b808fba8dfe3b6b9e5455cccaa2e8/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-19T04:54:52,558 INFO [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731991999909.058b808fba8dfe3b6b9e5455cccaa2e8. 2024-11-19T04:54:52,558 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 058b808fba8dfe3b6b9e5455cccaa2e8: Waiting for close lock at 1731992092483Running coprocessor pre-close hooks at 1731992092484 (+1 ms)Disabling compacts and flushes for region at 1731992092484Disabling writes for close at 1731992092484Obtaining lock to block concurrent updates at 1731992092484Preparing flush snapshotting stores in 058b808fba8dfe3b6b9e5455cccaa2e8 at 1731992092484Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731991999909.058b808fba8dfe3b6b9e5455cccaa2e8., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731992092484Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731991999909.058b808fba8dfe3b6b9e5455cccaa2e8. at 1731992092485 (+1 ms)Flushing 058b808fba8dfe3b6b9e5455cccaa2e8/info: creating writer at 1731992092486 (+1 ms)Flushing 058b808fba8dfe3b6b9e5455cccaa2e8/info: appending metadata at 1731992092490 (+4 ms)Flushing 058b808fba8dfe3b6b9e5455cccaa2e8/info: closing flushed file at 1731992092490Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42d657c7: reopening flushed file at 1731992092509 (+19 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 058b808fba8dfe3b6b9e5455cccaa2e8 in 36ms, sequenceid=48, compaction requested=true at 1731992092520 (+11 ms)Writing region close event to WAL at 1731992092549 (+29 ms)Running coprocessor post-close hooks at 1731992092556 (+7 ms)Closed at 1731992092558 (+2 ms) 2024-11-19T04:54:52,558 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731991999909.058b808fba8dfe3b6b9e5455cccaa2e8. 2024-11-19T04:54:52,579 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/hbase/meta/1588230740/.tmp/table/b971e24837a74173a6a053156848f717 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731992000372/Put/seqid=0 2024-11-19T04:54:52,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741853_1029 (size=5396) 2024-11-19T04:54:52,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40499 is added to blk_1073741853_1029 (size=5396) 2024-11-19T04:54:52,587 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/hbase/meta/1588230740/.tmp/table/b971e24837a74173a6a053156848f717 2024-11-19T04:54:52,595 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/hbase/meta/1588230740/.tmp/info/736c2cc3bc224c24bca01229f4b3b3ac as hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/hbase/meta/1588230740/info/736c2cc3bc224c24bca01229f4b3b3ac 2024-11-19T04:54:52,603 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/hbase/meta/1588230740/info/736c2cc3bc224c24bca01229f4b3b3ac, entries=10, sequenceid=11, filesize=6.9 K 2024-11-19T04:54:52,605 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/hbase/meta/1588230740/.tmp/ns/a4f24ee9eeb6486282af80ee4bcb6b87 as hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/hbase/meta/1588230740/ns/a4f24ee9eeb6486282af80ee4bcb6b87 2024-11-19T04:54:52,614 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/hbase/meta/1588230740/ns/a4f24ee9eeb6486282af80ee4bcb6b87, entries=2, sequenceid=11, filesize=5.0 K 2024-11-19T04:54:52,615 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/hbase/meta/1588230740/.tmp/table/b971e24837a74173a6a053156848f717 as hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/hbase/meta/1588230740/table/b971e24837a74173a6a053156848f717 2024-11-19T04:54:52,623 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/hbase/meta/1588230740/table/b971e24837a74173a6a053156848f717, entries=2, sequenceid=11, filesize=5.3 K 2024-11-19T04:54:52,625 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 139ms, sequenceid=11, compaction requested=false 2024-11-19T04:54:52,630 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-19T04:54:52,631 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T04:54:52,631 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T04:54:52,631 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731992092484Running coprocessor pre-close hooks at 1731992092484Disabling compacts and flushes for region at 1731992092484Disabling writes for close at 1731992092485 (+1 ms)Obtaining lock to block concurrent updates at 1731992092485Preparing flush snapshotting stores in 1588230740 at 1731992092485Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731992092485Flushing stores of hbase:meta,,1.1588230740 at 1731992092486 (+1 ms)Flushing 1588230740/info: creating writer at 1731992092486Flushing 1588230740/info: appending metadata at 1731992092509 (+23 ms)Flushing 1588230740/info: closing flushed file at 1731992092509Flushing 1588230740/ns: creating writer at 1731992092527 (+18 ms)Flushing 1588230740/ns: appending metadata at 1731992092546 (+19 ms)Flushing 1588230740/ns: closing flushed file at 1731992092546Flushing 1588230740/table: creating writer at 1731992092563 (+17 ms)Flushing 1588230740/table: appending metadata at 1731992092579 (+16 ms)Flushing 1588230740/table: closing flushed file at 1731992092579Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4563030e: reopening flushed file at 1731992092594 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5e190cfc: reopening flushed file at 1731992092604 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7b7b4852: reopening flushed file at 1731992092614 (+10 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 139ms, sequenceid=11, compaction requested=false at 1731992092625 (+11 ms)Writing region close event to WAL at 1731992092626 (+1 ms)Running coprocessor post-close hooks at 1731992092631 (+5 ms)Closed at 1731992092631 2024-11-19T04:54:52,631 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T04:54:52,685 INFO [RS:0;08a7f35e60d4:36677 {}] regionserver.HRegionServer(976): stopping server 08a7f35e60d4,36677,1731991997423; all regions closed. 2024-11-19T04:54:52,687 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:52,687 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:52,687 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:52,687 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:52,687 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:52,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40499 is added to blk_1073741834_1010 (size=3066) 2024-11-19T04:54:52,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741834_1010 (size=3066) 2024-11-19T04:54:52,694 DEBUG [RS:0;08a7f35e60d4:36677 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/oldWALs 2024-11-19T04:54:52,694 INFO [RS:0;08a7f35e60d4:36677 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 08a7f35e60d4%2C36677%2C1731991997423.meta:.meta(num 1731991999479) 2024-11-19T04:54:52,695 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:52,695 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:52,695 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:52,695 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:52,695 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:52,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40499 is added to blk_1073741847_1023 (size=12695) 2024-11-19T04:54:52,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741847_1023 (size=12695) 2024-11-19T04:54:52,702 DEBUG [RS:0;08a7f35e60d4:36677 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/oldWALs 2024-11-19T04:54:52,702 INFO [RS:0;08a7f35e60d4:36677 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 08a7f35e60d4%2C36677%2C1731991997423:(num 1731992072434) 2024-11-19T04:54:52,702 DEBUG [RS:0;08a7f35e60d4:36677 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:54:52,702 INFO [RS:0;08a7f35e60d4:36677 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T04:54:52,702 INFO [RS:0;08a7f35e60d4:36677 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T04:54:52,702 INFO [RS:0;08a7f35e60d4:36677 {}] hbase.ChoreService(370): Chore service for: regionserver/08a7f35e60d4:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-19T04:54:52,703 INFO [RS:0;08a7f35e60d4:36677 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T04:54:52,703 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T04:54:52,703 INFO [RS:0;08a7f35e60d4:36677 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36677 2024-11-19T04:54:52,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36677-0x1012e92708a0001, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/08a7f35e60d4,36677,1731991997423 2024-11-19T04:54:52,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37341-0x1012e92708a0000, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T04:54:52,708 INFO [RS:0;08a7f35e60d4:36677 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T04:54:52,709 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [08a7f35e60d4,36677,1731991997423] 2024-11-19T04:54:52,712 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/08a7f35e60d4,36677,1731991997423 already deleted, retry=false 2024-11-19T04:54:52,713 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 08a7f35e60d4,36677,1731991997423 expired; onlineServers=0 2024-11-19T04:54:52,713 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '08a7f35e60d4,37341,1731991996736' ***** 2024-11-19T04:54:52,713 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T04:54:52,713 INFO [M:0;08a7f35e60d4:37341 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T04:54:52,713 INFO [M:0;08a7f35e60d4:37341 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T04:54:52,713 DEBUG [M:0;08a7f35e60d4:37341 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T04:54:52,713 DEBUG [M:0;08a7f35e60d4:37341 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T04:54:52,713 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T04:54:52,713 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.large.0-1731991998770 {}] cleaner.HFileCleaner(306): Exit Thread[master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.large.0-1731991998770,5,FailOnTimeoutGroup] 2024-11-19T04:54:52,713 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.small.0-1731991998770 {}] cleaner.HFileCleaner(306): Exit Thread[master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.small.0-1731991998770,5,FailOnTimeoutGroup] 2024-11-19T04:54:52,713 INFO [M:0;08a7f35e60d4:37341 {}] hbase.ChoreService(370): Chore service for: master/08a7f35e60d4:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T04:54:52,714 INFO [M:0;08a7f35e60d4:37341 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T04:54:52,714 DEBUG [M:0;08a7f35e60d4:37341 {}] master.HMaster(1795): Stopping service threads 2024-11-19T04:54:52,714 INFO [M:0;08a7f35e60d4:37341 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T04:54:52,714 INFO [M:0;08a7f35e60d4:37341 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T04:54:52,714 INFO [M:0;08a7f35e60d4:37341 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T04:54:52,714 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T04:54:52,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37341-0x1012e92708a0000, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T04:54:52,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37341-0x1012e92708a0000, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:54:52,715 DEBUG [M:0;08a7f35e60d4:37341 {}] zookeeper.ZKUtil(347): master:37341-0x1012e92708a0000, quorum=127.0.0.1:59995, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T04:54:52,715 WARN [M:0;08a7f35e60d4:37341 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T04:54:52,716 INFO [M:0;08a7f35e60d4:37341 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/.lastflushedseqids 2024-11-19T04:54:52,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40499 is added to blk_1073741854_1030 (size=130) 2024-11-19T04:54:52,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741854_1030 (size=130) 2024-11-19T04:54:52,730 INFO [M:0;08a7f35e60d4:37341 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T04:54:52,730 INFO [M:0;08a7f35e60d4:37341 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T04:54:52,731 DEBUG [M:0;08a7f35e60d4:37341 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T04:54:52,731 INFO [M:0;08a7f35e60d4:37341 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:54:52,731 DEBUG [M:0;08a7f35e60d4:37341 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:54:52,731 DEBUG [M:0;08a7f35e60d4:37341 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T04:54:52,731 DEBUG [M:0;08a7f35e60d4:37341 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:54:52,731 INFO [M:0;08a7f35e60d4:37341 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-19T04:54:52,749 DEBUG [M:0;08a7f35e60d4:37341 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/beaf3ef96c9d4178908e0cac50ec0080 is 82, key is hbase:meta,,1/info:regioninfo/1731991999550/Put/seqid=0 2024-11-19T04:54:52,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40499 is added to blk_1073741855_1031 (size=5672) 2024-11-19T04:54:52,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741855_1031 (size=5672) 2024-11-19T04:54:52,756 INFO [M:0;08a7f35e60d4:37341 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/beaf3ef96c9d4178908e0cac50ec0080 2024-11-19T04:54:52,780 DEBUG [M:0;08a7f35e60d4:37341 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/013fd00b57d4482d9dd5c0c75e55ed2a is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731992000379/Put/seqid=0 2024-11-19T04:54:52,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40499 is added to blk_1073741856_1032 (size=6247) 2024-11-19T04:54:52,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741856_1032 (size=6247) 2024-11-19T04:54:52,787 INFO [M:0;08a7f35e60d4:37341 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/013fd00b57d4482d9dd5c0c75e55ed2a 2024-11-19T04:54:52,794 INFO [M:0;08a7f35e60d4:37341 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 013fd00b57d4482d9dd5c0c75e55ed2a 2024-11-19T04:54:52,810 DEBUG [M:0;08a7f35e60d4:37341 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c6480b8137334957a0d4381aad06022d is 69, key is 08a7f35e60d4,36677,1731991997423/rs:state/1731991998853/Put/seqid=0 2024-11-19T04:54:52,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36677-0x1012e92708a0001, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T04:54:52,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36677-0x1012e92708a0001, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T04:54:52,812 INFO [RS:0;08a7f35e60d4:36677 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T04:54:52,812 INFO [RS:0;08a7f35e60d4:36677 {}] regionserver.HRegionServer(1031): Exiting; stopping=08a7f35e60d4,36677,1731991997423; zookeeper connection closed. 2024-11-19T04:54:52,812 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7a3eb3e5 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7a3eb3e5 2024-11-19T04:54:52,813 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-19T04:54:52,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741857_1033 (size=5156) 2024-11-19T04:54:52,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40499 is added to blk_1073741857_1033 (size=5156) 2024-11-19T04:54:52,819 INFO [M:0;08a7f35e60d4:37341 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c6480b8137334957a0d4381aad06022d 2024-11-19T04:54:52,846 DEBUG [M:0;08a7f35e60d4:37341 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f944f07015b54c3894fca24a1619358a is 52, key is load_balancer_on/state:d/1731991999890/Put/seqid=0 2024-11-19T04:54:52,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40499 is added to blk_1073741858_1034 (size=5056) 2024-11-19T04:54:52,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741858_1034 (size=5056) 2024-11-19T04:54:52,855 INFO [M:0;08a7f35e60d4:37341 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f944f07015b54c3894fca24a1619358a 2024-11-19T04:54:52,863 DEBUG [M:0;08a7f35e60d4:37341 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/beaf3ef96c9d4178908e0cac50ec0080 as hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/beaf3ef96c9d4178908e0cac50ec0080 2024-11-19T04:54:52,871 INFO [M:0;08a7f35e60d4:37341 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/beaf3ef96c9d4178908e0cac50ec0080, entries=8, sequenceid=59, filesize=5.5 K 2024-11-19T04:54:52,874 DEBUG [M:0;08a7f35e60d4:37341 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/013fd00b57d4482d9dd5c0c75e55ed2a as hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/013fd00b57d4482d9dd5c0c75e55ed2a 2024-11-19T04:54:52,882 INFO [M:0;08a7f35e60d4:37341 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 013fd00b57d4482d9dd5c0c75e55ed2a 2024-11-19T04:54:52,882 INFO [M:0;08a7f35e60d4:37341 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/013fd00b57d4482d9dd5c0c75e55ed2a, entries=6, sequenceid=59, filesize=6.1 K 2024-11-19T04:54:52,884 DEBUG [M:0;08a7f35e60d4:37341 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c6480b8137334957a0d4381aad06022d as hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c6480b8137334957a0d4381aad06022d 2024-11-19T04:54:52,892 INFO [M:0;08a7f35e60d4:37341 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c6480b8137334957a0d4381aad06022d, entries=1, sequenceid=59, filesize=5.0 K 2024-11-19T04:54:52,893 DEBUG [M:0;08a7f35e60d4:37341 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f944f07015b54c3894fca24a1619358a as hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f944f07015b54c3894fca24a1619358a 2024-11-19T04:54:52,902 INFO [M:0;08a7f35e60d4:37341 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f944f07015b54c3894fca24a1619358a, entries=1, sequenceid=59, filesize=4.9 K 2024-11-19T04:54:52,904 INFO [M:0;08a7f35e60d4:37341 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 173ms, sequenceid=59, compaction requested=false 2024-11-19T04:54:52,909 INFO [M:0;08a7f35e60d4:37341 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:54:52,909 DEBUG [M:0;08a7f35e60d4:37341 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731992092731Disabling compacts and flushes for region at 1731992092731Disabling writes for close at 1731992092731Obtaining lock to block concurrent updates at 1731992092731Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731992092731Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1731992092731Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731992092732 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731992092732Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731992092749 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731992092749Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731992092762 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731992092779 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731992092779Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731992092794 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731992092810 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731992092810Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731992092828 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731992092845 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731992092845Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@8eef545: reopening flushed file at 1731992092862 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@12aa2dbc: reopening flushed file at 1731992092871 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2fad0934: reopening flushed file at 1731992092882 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@75ed7bf6: reopening flushed file at 1731992092892 (+10 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 173ms, sequenceid=59, compaction requested=false at 1731992092904 (+12 ms)Writing region close event to WAL at 1731992092908 (+4 ms)Closed at 1731992092909 (+1 ms) 2024-11-19T04:54:52,910 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:52,910 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:52,910 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:52,910 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:52,910 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:52,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741830_1006 (size=27973) 2024-11-19T04:54:52,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40499 is added to blk_1073741830_1006 (size=27973) 2024-11-19T04:54:52,914 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T04:54:52,914 INFO [M:0;08a7f35e60d4:37341 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T04:54:52,914 INFO [M:0;08a7f35e60d4:37341 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37341 2024-11-19T04:54:52,915 INFO [M:0;08a7f35e60d4:37341 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T04:54:52,930 INFO [regionserver/08a7f35e60d4:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T04:54:53,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37341-0x1012e92708a0000, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T04:54:53,017 INFO [M:0;08a7f35e60d4:37341 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T04:54:53,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37341-0x1012e92708a0000, quorum=127.0.0.1:59995, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T04:54:53,022 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:54:53,024 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T04:54:53,025 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T04:54:53,025 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T04:54:53,025 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/hadoop.log.dir/,STOPPED} 2024-11-19T04:54:53,029 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T04:54:53,029 WARN [BP-969444313-172.17.0.2-1731991993942 heartbeating to localhost/127.0.0.1:35757 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T04:54:53,030 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T04:54:53,030 WARN [BP-969444313-172.17.0.2-1731991993942 heartbeating to localhost/127.0.0.1:35757 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-969444313-172.17.0.2-1731991993942 (Datanode Uuid 8010304b-bd85-463f-8ab5-6eababe95d1a) service to localhost/127.0.0.1:35757 2024-11-19T04:54:53,031 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/cluster_d123a51d-24cf-f431-42e8-c73d729b17eb/data/data3/current/BP-969444313-172.17.0.2-1731991993942 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:54:53,032 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/cluster_d123a51d-24cf-f431-42e8-c73d729b17eb/data/data4/current/BP-969444313-172.17.0.2-1731991993942 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:54:53,032 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T04:54:53,042 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:54:53,042 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T04:54:53,042 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T04:54:53,043 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T04:54:53,043 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/hadoop.log.dir/,STOPPED} 2024-11-19T04:54:53,044 WARN [BP-969444313-172.17.0.2-1731991993942 heartbeating to localhost/127.0.0.1:35757 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T04:54:53,045 WARN [BP-969444313-172.17.0.2-1731991993942 heartbeating to localhost/127.0.0.1:35757 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-969444313-172.17.0.2-1731991993942 (Datanode Uuid df9449b8-380d-4ec3-87f3-b6292c8e0f07) service to localhost/127.0.0.1:35757 2024-11-19T04:54:53,045 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/cluster_d123a51d-24cf-f431-42e8-c73d729b17eb/data/data1/current/BP-969444313-172.17.0.2-1731991993942 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:54:53,046 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/cluster_d123a51d-24cf-f431-42e8-c73d729b17eb/data/data2/current/BP-969444313-172.17.0.2-1731991993942 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:54:53,046 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T04:54:53,046 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T04:54:53,046 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T04:54:53,060 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T04:54:53,061 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T04:54:53,061 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T04:54:53,061 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T04:54:53,061 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/hadoop.log.dir/,STOPPED} 2024-11-19T04:54:53,071 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T04:54:53,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T04:54:53,118 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=81 (was 12) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35757 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35757 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35757 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: master/08a7f35e60d4:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35757 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: master/08a7f35e60d4:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ForkJoinPool-2-worker-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/08a7f35e60d4:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@5d436e14 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35757 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35757 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35757 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:35757 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=404 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=177 (was 224), ProcessCount=11 (was 11), AvailableMemoryMB=12185 (was 12591) 2024-11-19T04:54:53,126 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=82, OpenFileDescriptor=404, MaxFileDescriptor=1048576, SystemLoadAverage=177, ProcessCount=11, AvailableMemoryMB=12185 2024-11-19T04:54:53,126 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T04:54:53,126 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/hadoop.log.dir so I do NOT create it in target/test-data/b1093eb7-96b9-17ee-3778-97521f906159 2024-11-19T04:54:53,126 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a4deaaf2-6427-68ce-89e0-7b4003a4c5f7/hadoop.tmp.dir so I do NOT create it in target/test-data/b1093eb7-96b9-17ee-3778-97521f906159 2024-11-19T04:54:53,126 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/cluster_ba6f92a7-d1ac-1f96-b390-ba7e329039c7, deleteOnExit=true 2024-11-19T04:54:53,126 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T04:54:53,127 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/test.cache.data in system properties and HBase conf 2024-11-19T04:54:53,127 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T04:54:53,127 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/hadoop.log.dir in system properties and HBase conf 2024-11-19T04:54:53,127 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T04:54:53,127 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T04:54:53,127 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T04:54:53,127 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T04:54:53,127 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T04:54:53,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T04:54:53,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T04:54:53,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T04:54:53,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T04:54:53,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T04:54:53,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T04:54:53,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T04:54:53,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T04:54:53,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/nfs.dump.dir in system properties and HBase conf 2024-11-19T04:54:53,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/java.io.tmpdir in system properties and HBase conf 2024-11-19T04:54:53,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T04:54:53,129 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T04:54:53,129 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T04:54:53,146 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T04:54:53,226 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:54:53,232 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T04:54:53,233 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T04:54:53,233 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T04:54:53,233 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T04:54:53,234 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:54:53,235 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f681677{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/hadoop.log.dir/,AVAILABLE} 2024-11-19T04:54:53,235 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3197ca45{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T04:54:53,355 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@49a88a00{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/java.io.tmpdir/jetty-localhost-36609-hadoop-hdfs-3_4_1-tests_jar-_-any-14494660174349502319/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T04:54:53,356 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4e4256d4{HTTP/1.1, (http/1.1)}{localhost:36609} 2024-11-19T04:54:53,356 INFO [Time-limited test {}] server.Server(415): Started @101401ms 2024-11-19T04:54:53,371 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T04:54:53,462 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:54:53,467 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T04:54:53,469 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T04:54:53,469 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T04:54:53,469 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T04:54:53,470 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@eab7acc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/hadoop.log.dir/,AVAILABLE} 2024-11-19T04:54:53,470 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4edee9ab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T04:54:53,593 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@542ee468{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/java.io.tmpdir/jetty-localhost-42649-hadoop-hdfs-3_4_1-tests_jar-_-any-7241291417757694318/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:54:53,593 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@75c88313{HTTP/1.1, (http/1.1)}{localhost:42649} 2024-11-19T04:54:53,593 INFO [Time-limited test {}] server.Server(415): Started @101638ms 2024-11-19T04:54:53,595 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T04:54:53,638 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:54:53,643 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T04:54:53,644 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T04:54:53,645 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T04:54:53,645 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T04:54:53,645 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c1be80f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/hadoop.log.dir/,AVAILABLE} 2024-11-19T04:54:53,646 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b44e274{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T04:54:53,695 WARN [Thread-437 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/cluster_ba6f92a7-d1ac-1f96-b390-ba7e329039c7/data/data1/current/BP-1756748749-172.17.0.2-1731992093165/current, will proceed with Du for space computation calculation, 2024-11-19T04:54:53,695 WARN [Thread-438 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/cluster_ba6f92a7-d1ac-1f96-b390-ba7e329039c7/data/data2/current/BP-1756748749-172.17.0.2-1731992093165/current, will proceed with Du for space computation calculation, 2024-11-19T04:54:53,730 WARN [Thread-416 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T04:54:53,736 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x335b23333945c9df with lease ID 0xb6d01488929b46dc: Processing first storage report for DS-10e570db-95c0-4208-8833-7bacefd989dd from datanode DatanodeRegistration(127.0.0.1:35465, datanodeUuid=a3d3c148-353a-48a3-8df1-ecf37c5e5443, infoPort=44383, infoSecurePort=0, ipcPort=43499, storageInfo=lv=-57;cid=testClusterID;nsid=1950494520;c=1731992093165) 2024-11-19T04:54:53,736 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x335b23333945c9df with lease ID 0xb6d01488929b46dc: from storage DS-10e570db-95c0-4208-8833-7bacefd989dd node DatanodeRegistration(127.0.0.1:35465, datanodeUuid=a3d3c148-353a-48a3-8df1-ecf37c5e5443, infoPort=44383, infoSecurePort=0, ipcPort=43499, storageInfo=lv=-57;cid=testClusterID;nsid=1950494520;c=1731992093165), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:54:53,736 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x335b23333945c9df with lease ID 0xb6d01488929b46dc: Processing first storage report for DS-c7ca0fa3-45e4-46d9-8fd0-370e8ce1b99a from datanode DatanodeRegistration(127.0.0.1:35465, datanodeUuid=a3d3c148-353a-48a3-8df1-ecf37c5e5443, infoPort=44383, infoSecurePort=0, ipcPort=43499, storageInfo=lv=-57;cid=testClusterID;nsid=1950494520;c=1731992093165) 2024-11-19T04:54:53,737 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x335b23333945c9df with lease ID 0xb6d01488929b46dc: from storage DS-c7ca0fa3-45e4-46d9-8fd0-370e8ce1b99a node DatanodeRegistration(127.0.0.1:35465, datanodeUuid=a3d3c148-353a-48a3-8df1-ecf37c5e5443, infoPort=44383, infoSecurePort=0, ipcPort=43499, storageInfo=lv=-57;cid=testClusterID;nsid=1950494520;c=1731992093165), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:54:53,784 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2d183c93{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/java.io.tmpdir/jetty-localhost-44145-hadoop-hdfs-3_4_1-tests_jar-_-any-12741996763814342394/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:54:53,785 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7154ca22{HTTP/1.1, (http/1.1)}{localhost:44145} 2024-11-19T04:54:53,785 INFO [Time-limited test {}] server.Server(415): Started @101829ms 2024-11-19T04:54:53,786 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T04:54:53,886 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/cluster_ba6f92a7-d1ac-1f96-b390-ba7e329039c7/data/data4/current/BP-1756748749-172.17.0.2-1731992093165/current, will proceed with Du for space computation calculation, 2024-11-19T04:54:53,886 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/cluster_ba6f92a7-d1ac-1f96-b390-ba7e329039c7/data/data3/current/BP-1756748749-172.17.0.2-1731992093165/current, will proceed with Du for space computation calculation, 2024-11-19T04:54:53,904 WARN [Thread-452 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T04:54:53,907 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdb48591e8a15421a with lease ID 0xb6d01488929b46dd: Processing first storage report for DS-8737b7c8-aa55-48fd-ad45-2b612b4a2f27 from datanode DatanodeRegistration(127.0.0.1:37507, datanodeUuid=fb326dc8-058d-4e47-8b6a-9d08fe7ff095, infoPort=33829, infoSecurePort=0, ipcPort=33541, storageInfo=lv=-57;cid=testClusterID;nsid=1950494520;c=1731992093165) 2024-11-19T04:54:53,907 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdb48591e8a15421a with lease ID 0xb6d01488929b46dd: from storage DS-8737b7c8-aa55-48fd-ad45-2b612b4a2f27 node DatanodeRegistration(127.0.0.1:37507, datanodeUuid=fb326dc8-058d-4e47-8b6a-9d08fe7ff095, infoPort=33829, infoSecurePort=0, ipcPort=33541, storageInfo=lv=-57;cid=testClusterID;nsid=1950494520;c=1731992093165), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:54:53,908 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdb48591e8a15421a with lease ID 0xb6d01488929b46dd: Processing first storage report for DS-fbebe4b8-07dc-4025-ac94-193fde7059ba from datanode DatanodeRegistration(127.0.0.1:37507, datanodeUuid=fb326dc8-058d-4e47-8b6a-9d08fe7ff095, infoPort=33829, infoSecurePort=0, ipcPort=33541, storageInfo=lv=-57;cid=testClusterID;nsid=1950494520;c=1731992093165) 2024-11-19T04:54:53,908 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdb48591e8a15421a with lease ID 0xb6d01488929b46dd: from storage DS-fbebe4b8-07dc-4025-ac94-193fde7059ba node DatanodeRegistration(127.0.0.1:37507, datanodeUuid=fb326dc8-058d-4e47-8b6a-9d08fe7ff095, infoPort=33829, infoSecurePort=0, ipcPort=33541, storageInfo=lv=-57;cid=testClusterID;nsid=1950494520;c=1731992093165), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:54:53,915 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159 2024-11-19T04:54:53,919 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/cluster_ba6f92a7-d1ac-1f96-b390-ba7e329039c7/zookeeper_0, clientPort=58257, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/cluster_ba6f92a7-d1ac-1f96-b390-ba7e329039c7/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/cluster_ba6f92a7-d1ac-1f96-b390-ba7e329039c7/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T04:54:53,921 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58257 2024-11-19T04:54:53,922 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:54:53,924 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:54:53,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37507 is added to blk_1073741825_1001 (size=7) 2024-11-19T04:54:53,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35465 is added to blk_1073741825_1001 (size=7) 2024-11-19T04:54:53,941 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c with version=8 2024-11-19T04:54:53,941 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/hbase-staging 2024-11-19T04:54:53,945 INFO [Time-limited test {}] client.ConnectionUtils(128): master/08a7f35e60d4:0 server-side Connection retries=45 2024-11-19T04:54:53,945 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T04:54:53,945 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T04:54:53,945 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T04:54:53,945 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T04:54:53,946 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T04:54:53,946 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T04:54:53,946 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T04:54:53,947 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36569 2024-11-19T04:54:53,949 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36569 connecting to ZooKeeper ensemble=127.0.0.1:58257 2024-11-19T04:54:53,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:365690x0, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T04:54:53,965 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36569-0x1012e93ef6d0000 connected 2024-11-19T04:54:53,995 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:54:53,997 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:54:54,000 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36569-0x1012e93ef6d0000, quorum=127.0.0.1:58257, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T04:54:54,000 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c, hbase.cluster.distributed=false 2024-11-19T04:54:54,002 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36569-0x1012e93ef6d0000, quorum=127.0.0.1:58257, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T04:54:54,005 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36569 2024-11-19T04:54:54,007 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36569 2024-11-19T04:54:54,008 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36569 2024-11-19T04:54:54,008 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36569 2024-11-19T04:54:54,009 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36569 2024-11-19T04:54:54,027 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/08a7f35e60d4:0 server-side Connection retries=45 2024-11-19T04:54:54,027 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T04:54:54,027 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T04:54:54,027 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T04:54:54,027 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T04:54:54,028 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T04:54:54,028 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T04:54:54,028 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T04:54:54,029 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40385 2024-11-19T04:54:54,030 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40385 connecting to ZooKeeper ensemble=127.0.0.1:58257 2024-11-19T04:54:54,031 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:54:54,034 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:54:54,041 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:403850x0, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T04:54:54,041 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:403850x0, quorum=127.0.0.1:58257, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T04:54:54,042 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40385-0x1012e93ef6d0001 connected 2024-11-19T04:54:54,042 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T04:54:54,042 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T04:54:54,043 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40385-0x1012e93ef6d0001, quorum=127.0.0.1:58257, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T04:54:54,044 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40385-0x1012e93ef6d0001, quorum=127.0.0.1:58257, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T04:54:54,047 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40385 2024-11-19T04:54:54,047 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40385 2024-11-19T04:54:54,047 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40385 2024-11-19T04:54:54,048 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40385 2024-11-19T04:54:54,049 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40385 2024-11-19T04:54:54,063 DEBUG [M:0;08a7f35e60d4:36569 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;08a7f35e60d4:36569 2024-11-19T04:54:54,063 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/08a7f35e60d4,36569,1731992093944 2024-11-19T04:54:54,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40385-0x1012e93ef6d0001, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T04:54:54,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36569-0x1012e93ef6d0000, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T04:54:54,066 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36569-0x1012e93ef6d0000, quorum=127.0.0.1:58257, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/08a7f35e60d4,36569,1731992093944 2024-11-19T04:54:54,067 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36569-0x1012e93ef6d0000, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:54:54,067 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40385-0x1012e93ef6d0001, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T04:54:54,067 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40385-0x1012e93ef6d0001, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:54:54,068 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36569-0x1012e93ef6d0000, quorum=127.0.0.1:58257, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T04:54:54,068 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/08a7f35e60d4,36569,1731992093944 from backup master directory 2024-11-19T04:54:54,070 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36569-0x1012e93ef6d0000, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/08a7f35e60d4,36569,1731992093944 2024-11-19T04:54:54,070 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40385-0x1012e93ef6d0001, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T04:54:54,070 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36569-0x1012e93ef6d0000, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T04:54:54,070 WARN [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T04:54:54,070 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=08a7f35e60d4,36569,1731992093944 2024-11-19T04:54:54,078 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/hbase.id] with ID: e90c6ebb-046b-46fd-b7fd-45e542f7a9b8 2024-11-19T04:54:54,078 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/.tmp/hbase.id 2024-11-19T04:54:54,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35465 is added to blk_1073741826_1002 (size=42) 2024-11-19T04:54:54,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37507 is added to blk_1073741826_1002 (size=42) 2024-11-19T04:54:54,085 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/.tmp/hbase.id]:[hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/hbase.id] 2024-11-19T04:54:54,100 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:54:54,101 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T04:54:54,102 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-19T04:54:54,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40385-0x1012e93ef6d0001, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:54:54,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36569-0x1012e93ef6d0000, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:54:54,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35465 is added to blk_1073741827_1003 (size=196) 2024-11-19T04:54:54,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37507 is added to blk_1073741827_1003 (size=196) 2024-11-19T04:54:54,115 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T04:54:54,116 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T04:54:54,116 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T04:54:54,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35465 is added to blk_1073741828_1004 (size=1189) 2024-11-19T04:54:54,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37507 is added to blk_1073741828_1004 (size=1189) 2024-11-19T04:54:54,128 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/MasterData/data/master/store 2024-11-19T04:54:54,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35465 is added to blk_1073741829_1005 (size=34) 2024-11-19T04:54:54,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37507 is added to blk_1073741829_1005 (size=34) 2024-11-19T04:54:54,544 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:54:54,545 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T04:54:54,545 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:54:54,545 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:54:54,545 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T04:54:54,545 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:54:54,545 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:54:54,545 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731992094544Disabling compacts and flushes for region at 1731992094544Disabling writes for close at 1731992094545 (+1 ms)Writing region close event to WAL at 1731992094545Closed at 1731992094545 2024-11-19T04:54:54,547 WARN [master/08a7f35e60d4:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/MasterData/data/master/store/.initializing 2024-11-19T04:54:54,547 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/MasterData/WALs/08a7f35e60d4,36569,1731992093944 2024-11-19T04:54:54,552 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=08a7f35e60d4%2C36569%2C1731992093944, suffix=, logDir=hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/MasterData/WALs/08a7f35e60d4,36569,1731992093944, archiveDir=hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/MasterData/oldWALs, maxLogs=10 2024-11-19T04:54:54,552 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C36569%2C1731992093944.1731992094552 2024-11-19T04:54:54,566 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/MasterData/WALs/08a7f35e60d4,36569,1731992093944/08a7f35e60d4%2C36569%2C1731992093944.1731992094552 2024-11-19T04:54:54,572 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33829:33829),(127.0.0.1/127.0.0.1:44383:44383)] 2024-11-19T04:54:54,584 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T04:54:54,585 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:54:54,585 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:54:54,585 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:54:54,587 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:54:54,589 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T04:54:54,589 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:54:54,590 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:54:54,590 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:54:54,592 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T04:54:54,592 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:54:54,593 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T04:54:54,593 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:54:54,596 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T04:54:54,596 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:54:54,597 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T04:54:54,597 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:54:54,599 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T04:54:54,599 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:54:54,600 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T04:54:54,602 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:54:54,603 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:54:54,603 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:54:54,605 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:54:54,605 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:54:54,606 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T04:54:54,608 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:54:54,616 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T04:54:54,616 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=808747, jitterRate=0.028376206755638123}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T04:54:54,618 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731992094585Initializing all the Stores at 1731992094587 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992094587Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992094587Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992094587Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992094587Cleaning up temporary data from old regions at 1731992094605 (+18 ms)Region opened successfully at 1731992094618 (+13 ms) 2024-11-19T04:54:54,620 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T04:54:54,626 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11d7a217, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=08a7f35e60d4/172.17.0.2:0 2024-11-19T04:54:54,628 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T04:54:54,628 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T04:54:54,628 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T04:54:54,628 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T04:54:54,629 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-19T04:54:54,630 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-19T04:54:54,630 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T04:54:54,636 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T04:54:54,637 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36569-0x1012e93ef6d0000, quorum=127.0.0.1:58257, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T04:54:54,639 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T04:54:54,639 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T04:54:54,640 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36569-0x1012e93ef6d0000, quorum=127.0.0.1:58257, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T04:54:54,642 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T04:54:54,642 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T04:54:54,645 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36569-0x1012e93ef6d0000, quorum=127.0.0.1:58257, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T04:54:54,647 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T04:54:54,648 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36569-0x1012e93ef6d0000, quorum=127.0.0.1:58257, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T04:54:54,650 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T04:54:54,656 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36569-0x1012e93ef6d0000, quorum=127.0.0.1:58257, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T04:54:54,657 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T04:54:54,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40385-0x1012e93ef6d0001, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T04:54:54,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36569-0x1012e93ef6d0000, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T04:54:54,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40385-0x1012e93ef6d0001, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:54:54,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36569-0x1012e93ef6d0000, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:54:54,661 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=08a7f35e60d4,36569,1731992093944, sessionid=0x1012e93ef6d0000, setting cluster-up flag (Was=false) 2024-11-19T04:54:54,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40385-0x1012e93ef6d0001, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:54:54,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36569-0x1012e93ef6d0000, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:54:54,671 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T04:54:54,672 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=08a7f35e60d4,36569,1731992093944 2024-11-19T04:54:54,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40385-0x1012e93ef6d0001, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:54:54,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36569-0x1012e93ef6d0000, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:54:54,688 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T04:54:54,690 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=08a7f35e60d4,36569,1731992093944 2024-11-19T04:54:54,692 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T04:54:54,694 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T04:54:54,695 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T04:54:54,695 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T04:54:54,695 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 08a7f35e60d4,36569,1731992093944 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T04:54:54,697 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/08a7f35e60d4:0, corePoolSize=5, maxPoolSize=5 2024-11-19T04:54:54,697 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/08a7f35e60d4:0, corePoolSize=5, maxPoolSize=5 2024-11-19T04:54:54,697 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/08a7f35e60d4:0, corePoolSize=5, maxPoolSize=5 2024-11-19T04:54:54,697 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/08a7f35e60d4:0, corePoolSize=5, maxPoolSize=5 2024-11-19T04:54:54,697 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/08a7f35e60d4:0, corePoolSize=10, maxPoolSize=10 2024-11-19T04:54:54,697 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:54,697 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/08a7f35e60d4:0, corePoolSize=2, maxPoolSize=2 2024-11-19T04:54:54,697 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:54,698 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731992124698 2024-11-19T04:54:54,698 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T04:54:54,699 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T04:54:54,699 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T04:54:54,699 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T04:54:54,699 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T04:54:54,699 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T04:54:54,699 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:54,699 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T04:54:54,699 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T04:54:54,700 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T04:54:54,700 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T04:54:54,700 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T04:54:54,700 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T04:54:54,700 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T04:54:54,700 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.large.0-1731992094700,5,FailOnTimeoutGroup] 2024-11-19T04:54:54,701 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.small.0-1731992094700,5,FailOnTimeoutGroup] 2024-11-19T04:54:54,701 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:54,701 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T04:54:54,701 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:54:54,701 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:54,701 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:54,701 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T04:54:54,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37507 is added to blk_1073741831_1007 (size=1321) 2024-11-19T04:54:54,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35465 is added to blk_1073741831_1007 (size=1321) 2024-11-19T04:54:54,721 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T04:54:54,721 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c 2024-11-19T04:54:54,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37507 is added to blk_1073741832_1008 (size=32) 2024-11-19T04:54:54,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35465 is added to blk_1073741832_1008 (size=32) 2024-11-19T04:54:54,745 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:54:54,747 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T04:54:54,749 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T04:54:54,749 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:54:54,750 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:54:54,751 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T04:54:54,753 INFO [RS:0;08a7f35e60d4:40385 {}] regionserver.HRegionServer(746): ClusterId : e90c6ebb-046b-46fd-b7fd-45e542f7a9b8 2024-11-19T04:54:54,754 DEBUG [RS:0;08a7f35e60d4:40385 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T04:54:54,756 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T04:54:54,756 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:54:54,759 DEBUG [RS:0;08a7f35e60d4:40385 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T04:54:54,759 DEBUG [RS:0;08a7f35e60d4:40385 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T04:54:54,763 DEBUG [RS:0;08a7f35e60d4:40385 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T04:54:54,763 DEBUG [RS:0;08a7f35e60d4:40385 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@52d5f963, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=08a7f35e60d4/172.17.0.2:0 2024-11-19T04:54:54,768 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:54:54,769 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T04:54:54,771 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T04:54:54,771 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:54:54,772 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:54:54,772 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T04:54:54,774 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T04:54:54,774 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:54:54,775 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:54:54,775 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T04:54:54,776 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/data/hbase/meta/1588230740 2024-11-19T04:54:54,777 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/data/hbase/meta/1588230740 2024-11-19T04:54:54,779 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T04:54:54,779 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T04:54:54,780 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T04:54:54,782 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T04:54:54,784 DEBUG [RS:0;08a7f35e60d4:40385 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;08a7f35e60d4:40385 2024-11-19T04:54:54,784 INFO [RS:0;08a7f35e60d4:40385 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T04:54:54,784 INFO [RS:0;08a7f35e60d4:40385 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T04:54:54,784 DEBUG [RS:0;08a7f35e60d4:40385 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T04:54:54,786 INFO [RS:0;08a7f35e60d4:40385 {}] regionserver.HRegionServer(2659): reportForDuty to master=08a7f35e60d4,36569,1731992093944 with port=40385, startcode=1731992094027 2024-11-19T04:54:54,786 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T04:54:54,786 DEBUG [RS:0;08a7f35e60d4:40385 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T04:54:54,787 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=836407, jitterRate=0.06354719400405884}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T04:54:54,789 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731992094745Initializing all the Stores at 1731992094746 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992094746Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992094747 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992094747Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992094747Cleaning up temporary data from old regions at 1731992094779 (+32 ms)Region opened successfully at 1731992094789 (+10 ms) 2024-11-19T04:54:54,789 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T04:54:54,789 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T04:54:54,789 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T04:54:54,789 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T04:54:54,789 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T04:54:54,791 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40659, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T04:54:54,791 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36569 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 08a7f35e60d4,40385,1731992094027 2024-11-19T04:54:54,792 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36569 {}] master.ServerManager(517): Registering regionserver=08a7f35e60d4,40385,1731992094027 2024-11-19T04:54:54,794 DEBUG [RS:0;08a7f35e60d4:40385 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c 2024-11-19T04:54:54,794 DEBUG [RS:0;08a7f35e60d4:40385 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40649 2024-11-19T04:54:54,795 DEBUG [RS:0;08a7f35e60d4:40385 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T04:54:54,795 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T04:54:54,795 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731992094789Disabling compacts and flushes for region at 1731992094789Disabling writes for close at 1731992094789Writing region close event to WAL at 1731992094795 (+6 ms)Closed at 1731992094795 2024-11-19T04:54:54,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36569-0x1012e93ef6d0000, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T04:54:54,798 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T04:54:54,798 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T04:54:54,798 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T04:54:54,798 DEBUG [RS:0;08a7f35e60d4:40385 {}] zookeeper.ZKUtil(111): regionserver:40385-0x1012e93ef6d0001, quorum=127.0.0.1:58257, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/08a7f35e60d4,40385,1731992094027 2024-11-19T04:54:54,798 WARN [RS:0;08a7f35e60d4:40385 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T04:54:54,799 INFO [RS:0;08a7f35e60d4:40385 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T04:54:54,799 DEBUG [RS:0;08a7f35e60d4:40385 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/WALs/08a7f35e60d4,40385,1731992094027 2024-11-19T04:54:54,799 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [08a7f35e60d4,40385,1731992094027] 2024-11-19T04:54:54,800 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T04:54:54,802 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T04:54:54,814 INFO [RS:0;08a7f35e60d4:40385 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T04:54:54,820 INFO [RS:0;08a7f35e60d4:40385 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T04:54:54,825 INFO [RS:0;08a7f35e60d4:40385 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T04:54:54,825 INFO [RS:0;08a7f35e60d4:40385 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:54,828 INFO [RS:0;08a7f35e60d4:40385 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T04:54:54,830 INFO [RS:0;08a7f35e60d4:40385 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T04:54:54,830 INFO [RS:0;08a7f35e60d4:40385 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:54,830 DEBUG [RS:0;08a7f35e60d4:40385 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:54,830 DEBUG [RS:0;08a7f35e60d4:40385 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:54,830 DEBUG [RS:0;08a7f35e60d4:40385 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:54,830 DEBUG [RS:0;08a7f35e60d4:40385 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:54,830 DEBUG [RS:0;08a7f35e60d4:40385 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:54,830 DEBUG [RS:0;08a7f35e60d4:40385 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/08a7f35e60d4:0, corePoolSize=2, maxPoolSize=2 2024-11-19T04:54:54,831 DEBUG [RS:0;08a7f35e60d4:40385 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:54,831 DEBUG [RS:0;08a7f35e60d4:40385 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:54,831 DEBUG [RS:0;08a7f35e60d4:40385 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:54,831 DEBUG [RS:0;08a7f35e60d4:40385 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:54,831 DEBUG [RS:0;08a7f35e60d4:40385 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:54,831 DEBUG [RS:0;08a7f35e60d4:40385 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:54,831 DEBUG [RS:0;08a7f35e60d4:40385 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/08a7f35e60d4:0, corePoolSize=3, maxPoolSize=3 2024-11-19T04:54:54,831 DEBUG [RS:0;08a7f35e60d4:40385 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0, corePoolSize=3, maxPoolSize=3 2024-11-19T04:54:54,833 INFO [RS:0;08a7f35e60d4:40385 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:54,833 INFO [RS:0;08a7f35e60d4:40385 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:54,833 INFO [RS:0;08a7f35e60d4:40385 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:54,833 INFO [RS:0;08a7f35e60d4:40385 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:54,833 INFO [RS:0;08a7f35e60d4:40385 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:54,833 INFO [RS:0;08a7f35e60d4:40385 {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,40385,1731992094027-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T04:54:54,859 INFO [RS:0;08a7f35e60d4:40385 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T04:54:54,859 INFO [RS:0;08a7f35e60d4:40385 {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,40385,1731992094027-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:54,860 INFO [RS:0;08a7f35e60d4:40385 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:54,860 INFO [RS:0;08a7f35e60d4:40385 {}] regionserver.Replication(171): 08a7f35e60d4,40385,1731992094027 started 2024-11-19T04:54:54,884 INFO [RS:0;08a7f35e60d4:40385 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:54,884 INFO [RS:0;08a7f35e60d4:40385 {}] regionserver.HRegionServer(1482): Serving as 08a7f35e60d4,40385,1731992094027, RpcServer on 08a7f35e60d4/172.17.0.2:40385, sessionid=0x1012e93ef6d0001 2024-11-19T04:54:54,885 DEBUG [RS:0;08a7f35e60d4:40385 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T04:54:54,885 DEBUG [RS:0;08a7f35e60d4:40385 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 08a7f35e60d4,40385,1731992094027 2024-11-19T04:54:54,885 DEBUG [RS:0;08a7f35e60d4:40385 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '08a7f35e60d4,40385,1731992094027' 2024-11-19T04:54:54,885 DEBUG [RS:0;08a7f35e60d4:40385 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T04:54:54,886 DEBUG [RS:0;08a7f35e60d4:40385 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T04:54:54,886 DEBUG [RS:0;08a7f35e60d4:40385 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T04:54:54,886 DEBUG [RS:0;08a7f35e60d4:40385 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T04:54:54,887 DEBUG [RS:0;08a7f35e60d4:40385 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 08a7f35e60d4,40385,1731992094027 2024-11-19T04:54:54,887 DEBUG [RS:0;08a7f35e60d4:40385 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '08a7f35e60d4,40385,1731992094027' 2024-11-19T04:54:54,887 DEBUG [RS:0;08a7f35e60d4:40385 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T04:54:54,887 DEBUG [RS:0;08a7f35e60d4:40385 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T04:54:54,888 DEBUG [RS:0;08a7f35e60d4:40385 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T04:54:54,888 INFO [RS:0;08a7f35e60d4:40385 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T04:54:54,888 INFO [RS:0;08a7f35e60d4:40385 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T04:54:54,953 WARN [08a7f35e60d4:36569 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-19T04:54:54,991 INFO [RS:0;08a7f35e60d4:40385 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=08a7f35e60d4%2C40385%2C1731992094027, suffix=, logDir=hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/WALs/08a7f35e60d4,40385,1731992094027, archiveDir=hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/oldWALs, maxLogs=32 2024-11-19T04:54:54,993 INFO [RS:0;08a7f35e60d4:40385 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C40385%2C1731992094027.1731992094992 2024-11-19T04:54:55,002 INFO [RS:0;08a7f35e60d4:40385 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/WALs/08a7f35e60d4,40385,1731992094027/08a7f35e60d4%2C40385%2C1731992094027.1731992094992 2024-11-19T04:54:55,005 DEBUG [RS:0;08a7f35e60d4:40385 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44383:44383),(127.0.0.1/127.0.0.1:33829:33829)] 2024-11-19T04:54:55,203 DEBUG [08a7f35e60d4:36569 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T04:54:55,204 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=08a7f35e60d4,40385,1731992094027 2024-11-19T04:54:55,206 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 08a7f35e60d4,40385,1731992094027, state=OPENING 2024-11-19T04:54:55,208 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T04:54:55,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36569-0x1012e93ef6d0000, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:54:55,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40385-0x1012e93ef6d0001, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:54:55,210 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T04:54:55,210 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T04:54:55,210 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T04:54:55,210 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=08a7f35e60d4,40385,1731992094027}] 2024-11-19T04:54:55,364 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T04:54:55,367 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33563, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T04:54:55,373 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T04:54:55,373 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T04:54:55,376 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=08a7f35e60d4%2C40385%2C1731992094027.meta, suffix=.meta, logDir=hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/WALs/08a7f35e60d4,40385,1731992094027, archiveDir=hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/oldWALs, maxLogs=32 2024-11-19T04:54:55,379 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C40385%2C1731992094027.meta.1731992095379.meta 2024-11-19T04:54:55,397 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/WALs/08a7f35e60d4,40385,1731992094027/08a7f35e60d4%2C40385%2C1731992094027.meta.1731992095379.meta 2024-11-19T04:54:55,406 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33829:33829),(127.0.0.1/127.0.0.1:44383:44383)] 2024-11-19T04:54:55,408 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T04:54:55,408 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T04:54:55,408 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T04:54:55,408 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T04:54:55,408 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T04:54:55,409 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:54:55,409 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T04:54:55,409 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T04:54:55,411 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T04:54:55,413 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T04:54:55,413 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:54:55,413 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:54:55,414 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T04:54:55,415 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T04:54:55,415 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:54:55,415 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:54:55,415 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T04:54:55,416 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T04:54:55,417 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:54:55,417 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:54:55,417 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T04:54:55,418 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T04:54:55,418 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:54:55,419 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:54:55,419 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T04:54:55,420 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/data/hbase/meta/1588230740 2024-11-19T04:54:55,422 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/data/hbase/meta/1588230740 2024-11-19T04:54:55,423 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T04:54:55,423 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T04:54:55,424 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T04:54:55,426 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T04:54:55,427 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=725849, jitterRate=-0.07703635096549988}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T04:54:55,428 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T04:54:55,429 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731992095409Writing region info on filesystem at 1731992095409Initializing all the Stores at 1731992095410 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992095410Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992095411 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992095411Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992095411Cleaning up temporary data from old regions at 1731992095423 (+12 ms)Running coprocessor post-open hooks at 1731992095428 (+5 ms)Region opened successfully at 1731992095429 (+1 ms) 2024-11-19T04:54:55,431 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731992095364 2024-11-19T04:54:55,434 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T04:54:55,434 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T04:54:55,435 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=08a7f35e60d4,40385,1731992094027 2024-11-19T04:54:55,436 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 08a7f35e60d4,40385,1731992094027, state=OPEN 2024-11-19T04:54:55,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40385-0x1012e93ef6d0001, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T04:54:55,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36569-0x1012e93ef6d0000, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T04:54:55,447 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=08a7f35e60d4,40385,1731992094027 2024-11-19T04:54:55,447 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T04:54:55,447 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T04:54:55,450 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T04:54:55,451 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=08a7f35e60d4,40385,1731992094027 in 237 msec 2024-11-19T04:54:55,454 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T04:54:55,454 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 652 msec 2024-11-19T04:54:55,455 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T04:54:55,455 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T04:54:55,457 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T04:54:55,457 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=08a7f35e60d4,40385,1731992094027, seqNum=-1] 2024-11-19T04:54:55,457 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T04:54:55,458 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36163, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T04:54:55,466 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 771 msec 2024-11-19T04:54:55,466 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731992095466, completionTime=-1 2024-11-19T04:54:55,466 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T04:54:55,466 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-19T04:54:55,468 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-19T04:54:55,468 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731992155468 2024-11-19T04:54:55,468 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731992215468 2024-11-19T04:54:55,468 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-19T04:54:55,468 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,36569,1731992093944-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:55,469 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,36569,1731992093944-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:55,469 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,36569,1731992093944-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:55,469 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-08a7f35e60d4:36569, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:55,469 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:55,469 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:55,471 DEBUG [master/08a7f35e60d4:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T04:54:55,474 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.404sec 2024-11-19T04:54:55,474 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T04:54:55,474 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T04:54:55,474 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T04:54:55,474 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T04:54:55,474 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T04:54:55,474 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,36569,1731992093944-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T04:54:55,474 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,36569,1731992093944-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T04:54:55,477 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T04:54:55,477 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T04:54:55,477 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,36569,1731992093944-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:55,553 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2fa3cb60, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T04:54:55,553 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 08a7f35e60d4,36569,-1 for getting cluster id 2024-11-19T04:54:55,553 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T04:54:55,555 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e90c6ebb-046b-46fd-b7fd-45e542f7a9b8' 2024-11-19T04:54:55,556 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T04:54:55,556 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e90c6ebb-046b-46fd-b7fd-45e542f7a9b8" 2024-11-19T04:54:55,557 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@111b08da, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T04:54:55,557 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [08a7f35e60d4,36569,-1] 2024-11-19T04:54:55,557 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T04:54:55,558 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:54:55,559 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46632, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T04:54:55,560 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f14acfa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T04:54:55,561 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T04:54:55,562 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=08a7f35e60d4,40385,1731992094027, seqNum=-1] 2024-11-19T04:54:55,563 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T04:54:55,565 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56550, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T04:54:55,567 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=08a7f35e60d4,36569,1731992093944 2024-11-19T04:54:55,567 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:54:55,571 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-19T04:54:55,571 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T04:54:55,571 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T04:54:55,572 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T04:54:55,572 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:54:55,572 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:54:55,572 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T04:54:55,572 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T04:54:55,572 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1712408714, stopped=false 2024-11-19T04:54:55,572 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=08a7f35e60d4,36569,1731992093944 2024-11-19T04:54:55,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36569-0x1012e93ef6d0000, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T04:54:55,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40385-0x1012e93ef6d0001, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T04:54:55,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36569-0x1012e93ef6d0000, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:54:55,576 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T04:54:55,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40385-0x1012e93ef6d0001, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:54:55,576 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T04:54:55,576 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T04:54:55,576 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:54:55,576 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36569-0x1012e93ef6d0000, quorum=127.0.0.1:58257, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T04:54:55,577 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '08a7f35e60d4,40385,1731992094027' ***** 2024-11-19T04:54:55,577 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T04:54:55,577 INFO [RS:0;08a7f35e60d4:40385 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T04:54:55,577 INFO [RS:0;08a7f35e60d4:40385 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T04:54:55,577 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T04:54:55,577 INFO [RS:0;08a7f35e60d4:40385 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T04:54:55,577 INFO [RS:0;08a7f35e60d4:40385 {}] regionserver.HRegionServer(959): stopping server 08a7f35e60d4,40385,1731992094027 2024-11-19T04:54:55,577 INFO [RS:0;08a7f35e60d4:40385 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T04:54:55,577 INFO [RS:0;08a7f35e60d4:40385 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;08a7f35e60d4:40385. 2024-11-19T04:54:55,577 DEBUG [RS:0;08a7f35e60d4:40385 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T04:54:55,578 DEBUG [RS:0;08a7f35e60d4:40385 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:54:55,578 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40385-0x1012e93ef6d0001, quorum=127.0.0.1:58257, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T04:54:55,578 INFO [RS:0;08a7f35e60d4:40385 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T04:54:55,578 INFO [RS:0;08a7f35e60d4:40385 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T04:54:55,578 INFO [RS:0;08a7f35e60d4:40385 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T04:54:55,578 INFO [RS:0;08a7f35e60d4:40385 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T04:54:55,578 INFO [RS:0;08a7f35e60d4:40385 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-19T04:54:55,578 DEBUG [RS:0;08a7f35e60d4:40385 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-19T04:54:55,578 DEBUG [RS:0;08a7f35e60d4:40385 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-19T04:54:55,578 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T04:54:55,579 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T04:54:55,579 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T04:54:55,579 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T04:54:55,579 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T04:54:55,579 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-19T04:54:55,599 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/data/hbase/meta/1588230740/.tmp/ns/9f93f4e37f1d446a94018fa4bf1059fe is 43, key is default/ns:d/1731992095459/Put/seqid=0 2024-11-19T04:54:55,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35465 is added to blk_1073741835_1011 (size=5153) 2024-11-19T04:54:55,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37507 is added to blk_1073741835_1011 (size=5153) 2024-11-19T04:54:55,779 DEBUG [RS:0;08a7f35e60d4:40385 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-19T04:54:55,840 INFO [regionserver/08a7f35e60d4:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-19T04:54:55,840 INFO [regionserver/08a7f35e60d4:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-19T04:54:55,979 DEBUG [RS:0;08a7f35e60d4:40385 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-19T04:54:56,011 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/data/hbase/meta/1588230740/.tmp/ns/9f93f4e37f1d446a94018fa4bf1059fe 2024-11-19T04:54:56,019 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/data/hbase/meta/1588230740/.tmp/ns/9f93f4e37f1d446a94018fa4bf1059fe as hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/data/hbase/meta/1588230740/ns/9f93f4e37f1d446a94018fa4bf1059fe 2024-11-19T04:54:56,027 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/data/hbase/meta/1588230740/ns/9f93f4e37f1d446a94018fa4bf1059fe, entries=2, sequenceid=6, filesize=5.0 K 2024-11-19T04:54:56,029 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 450ms, sequenceid=6, compaction requested=false 2024-11-19T04:54:56,029 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-19T04:54:56,036 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-19T04:54:56,037 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T04:54:56,037 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T04:54:56,038 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731992095578Running coprocessor pre-close hooks at 1731992095578Disabling compacts and flushes for region at 1731992095578Disabling writes for close at 1731992095579 (+1 ms)Obtaining lock to block concurrent updates at 1731992095579Preparing flush snapshotting stores in 1588230740 at 1731992095579Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731992095580 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731992095581 (+1 ms)Flushing 1588230740/ns: creating writer at 1731992095581Flushing 1588230740/ns: appending metadata at 1731992095598 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1731992095598Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a594ce0: reopening flushed file at 1731992096018 (+420 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 450ms, sequenceid=6, compaction requested=false at 1731992096029 (+11 ms)Writing region close event to WAL at 1731992096031 (+2 ms)Running coprocessor post-close hooks at 1731992096037 (+6 ms)Closed at 1731992096037 2024-11-19T04:54:56,038 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T04:54:56,179 INFO [RS:0;08a7f35e60d4:40385 {}] regionserver.HRegionServer(976): stopping server 08a7f35e60d4,40385,1731992094027; all regions closed. 2024-11-19T04:54:56,180 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:56,180 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:56,180 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:56,180 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:56,181 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:56,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35465 is added to blk_1073741834_1010 (size=1152) 2024-11-19T04:54:56,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37507 is added to blk_1073741834_1010 (size=1152) 2024-11-19T04:54:56,187 DEBUG [RS:0;08a7f35e60d4:40385 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/oldWALs 2024-11-19T04:54:56,187 INFO [RS:0;08a7f35e60d4:40385 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 08a7f35e60d4%2C40385%2C1731992094027.meta:.meta(num 1731992095379) 2024-11-19T04:54:56,187 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:56,188 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:56,188 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:56,188 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:56,188 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:56,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37507 is added to blk_1073741833_1009 (size=93) 2024-11-19T04:54:56,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35465 is added to blk_1073741833_1009 (size=93) 2024-11-19T04:54:56,194 DEBUG [RS:0;08a7f35e60d4:40385 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/oldWALs 2024-11-19T04:54:56,194 INFO [RS:0;08a7f35e60d4:40385 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 08a7f35e60d4%2C40385%2C1731992094027:(num 1731992094992) 2024-11-19T04:54:56,194 DEBUG [RS:0;08a7f35e60d4:40385 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:54:56,194 INFO [RS:0;08a7f35e60d4:40385 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T04:54:56,194 INFO [RS:0;08a7f35e60d4:40385 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T04:54:56,194 INFO [RS:0;08a7f35e60d4:40385 {}] hbase.ChoreService(370): Chore service for: regionserver/08a7f35e60d4:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-19T04:54:56,195 INFO [RS:0;08a7f35e60d4:40385 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T04:54:56,195 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T04:54:56,195 INFO [RS:0;08a7f35e60d4:40385 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40385 2024-11-19T04:54:56,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40385-0x1012e93ef6d0001, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/08a7f35e60d4,40385,1731992094027 2024-11-19T04:54:56,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36569-0x1012e93ef6d0000, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T04:54:56,197 INFO [RS:0;08a7f35e60d4:40385 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T04:54:56,199 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [08a7f35e60d4,40385,1731992094027] 2024-11-19T04:54:56,200 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/08a7f35e60d4,40385,1731992094027 already deleted, retry=false 2024-11-19T04:54:56,200 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 08a7f35e60d4,40385,1731992094027 expired; onlineServers=0 2024-11-19T04:54:56,200 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '08a7f35e60d4,36569,1731992093944' ***** 2024-11-19T04:54:56,200 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T04:54:56,201 INFO [M:0;08a7f35e60d4:36569 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T04:54:56,201 INFO [M:0;08a7f35e60d4:36569 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T04:54:56,201 DEBUG [M:0;08a7f35e60d4:36569 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T04:54:56,201 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T04:54:56,201 DEBUG [M:0;08a7f35e60d4:36569 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T04:54:56,201 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.large.0-1731992094700 {}] cleaner.HFileCleaner(306): Exit Thread[master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.large.0-1731992094700,5,FailOnTimeoutGroup] 2024-11-19T04:54:56,201 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.small.0-1731992094700 {}] cleaner.HFileCleaner(306): Exit Thread[master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.small.0-1731992094700,5,FailOnTimeoutGroup] 2024-11-19T04:54:56,201 INFO [M:0;08a7f35e60d4:36569 {}] hbase.ChoreService(370): Chore service for: master/08a7f35e60d4:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T04:54:56,202 INFO [M:0;08a7f35e60d4:36569 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T04:54:56,202 DEBUG [M:0;08a7f35e60d4:36569 {}] master.HMaster(1795): Stopping service threads 2024-11-19T04:54:56,202 INFO [M:0;08a7f35e60d4:36569 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T04:54:56,202 INFO [M:0;08a7f35e60d4:36569 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T04:54:56,202 INFO [M:0;08a7f35e60d4:36569 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T04:54:56,202 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T04:54:56,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36569-0x1012e93ef6d0000, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T04:54:56,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36569-0x1012e93ef6d0000, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:54:56,204 DEBUG [M:0;08a7f35e60d4:36569 {}] zookeeper.ZKUtil(347): master:36569-0x1012e93ef6d0000, quorum=127.0.0.1:58257, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T04:54:56,204 WARN [M:0;08a7f35e60d4:36569 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T04:54:56,205 INFO [M:0;08a7f35e60d4:36569 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/.lastflushedseqids 2024-11-19T04:54:56,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37507 is added to blk_1073741836_1012 (size=99) 2024-11-19T04:54:56,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35465 is added to blk_1073741836_1012 (size=99) 2024-11-19T04:54:56,217 INFO [M:0;08a7f35e60d4:36569 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T04:54:56,217 INFO [M:0;08a7f35e60d4:36569 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T04:54:56,217 DEBUG [M:0;08a7f35e60d4:36569 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T04:54:56,217 INFO [M:0;08a7f35e60d4:36569 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:54:56,218 DEBUG [M:0;08a7f35e60d4:36569 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:54:56,218 DEBUG [M:0;08a7f35e60d4:36569 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T04:54:56,218 DEBUG [M:0;08a7f35e60d4:36569 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:54:56,218 INFO [M:0;08a7f35e60d4:36569 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-19T04:54:56,245 DEBUG [M:0;08a7f35e60d4:36569 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a909e6af1aa04eafbe0985163c25814e is 82, key is hbase:meta,,1/info:regioninfo/1731992095435/Put/seqid=0 2024-11-19T04:54:56,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37507 is added to blk_1073741837_1013 (size=5672) 2024-11-19T04:54:56,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35465 is added to blk_1073741837_1013 (size=5672) 2024-11-19T04:54:56,258 INFO [M:0;08a7f35e60d4:36569 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a909e6af1aa04eafbe0985163c25814e 2024-11-19T04:54:56,284 DEBUG [M:0;08a7f35e60d4:36569 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/34075ddffaf14ec9be6edba65fbeb78e is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731992095465/Put/seqid=0 2024-11-19T04:54:56,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35465 is added to blk_1073741838_1014 (size=5275) 2024-11-19T04:54:56,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37507 is added to blk_1073741838_1014 (size=5275) 2024-11-19T04:54:56,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40385-0x1012e93ef6d0001, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T04:54:56,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40385-0x1012e93ef6d0001, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T04:54:56,299 INFO [RS:0;08a7f35e60d4:40385 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T04:54:56,299 INFO [RS:0;08a7f35e60d4:40385 {}] regionserver.HRegionServer(1031): Exiting; stopping=08a7f35e60d4,40385,1731992094027; zookeeper connection closed. 2024-11-19T04:54:56,300 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@29b73268 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@29b73268 2024-11-19T04:54:56,300 INFO [M:0;08a7f35e60d4:36569 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/34075ddffaf14ec9be6edba65fbeb78e 2024-11-19T04:54:56,300 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-19T04:54:56,330 DEBUG [M:0;08a7f35e60d4:36569 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/76c0db9fea9f4ac2b8ed3d9c9fa85d80 is 69, key is 08a7f35e60d4,40385,1731992094027/rs:state/1731992094792/Put/seqid=0 2024-11-19T04:54:56,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35465 is added to blk_1073741839_1015 (size=5156) 2024-11-19T04:54:56,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37507 is added to blk_1073741839_1015 (size=5156) 2024-11-19T04:54:56,339 INFO [M:0;08a7f35e60d4:36569 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/76c0db9fea9f4ac2b8ed3d9c9fa85d80 2024-11-19T04:54:56,364 DEBUG [M:0;08a7f35e60d4:36569 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4f358fdf9dea44828c9558a473f8cf53 is 52, key is load_balancer_on/state:d/1731992095569/Put/seqid=0 2024-11-19T04:54:56,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35465 is added to blk_1073741840_1016 (size=5056) 2024-11-19T04:54:56,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37507 is added to blk_1073741840_1016 (size=5056) 2024-11-19T04:54:56,776 INFO [M:0;08a7f35e60d4:36569 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4f358fdf9dea44828c9558a473f8cf53 2024-11-19T04:54:56,783 DEBUG [M:0;08a7f35e60d4:36569 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a909e6af1aa04eafbe0985163c25814e as hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a909e6af1aa04eafbe0985163c25814e 2024-11-19T04:54:56,789 INFO [M:0;08a7f35e60d4:36569 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a909e6af1aa04eafbe0985163c25814e, entries=8, sequenceid=29, filesize=5.5 K 2024-11-19T04:54:56,790 DEBUG [M:0;08a7f35e60d4:36569 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/34075ddffaf14ec9be6edba65fbeb78e as hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/34075ddffaf14ec9be6edba65fbeb78e 2024-11-19T04:54:56,796 INFO [M:0;08a7f35e60d4:36569 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/34075ddffaf14ec9be6edba65fbeb78e, entries=3, sequenceid=29, filesize=5.2 K 2024-11-19T04:54:56,798 DEBUG [M:0;08a7f35e60d4:36569 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/76c0db9fea9f4ac2b8ed3d9c9fa85d80 as hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/76c0db9fea9f4ac2b8ed3d9c9fa85d80 2024-11-19T04:54:56,804 INFO [M:0;08a7f35e60d4:36569 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/76c0db9fea9f4ac2b8ed3d9c9fa85d80, entries=1, sequenceid=29, filesize=5.0 K 2024-11-19T04:54:56,805 DEBUG [M:0;08a7f35e60d4:36569 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4f358fdf9dea44828c9558a473f8cf53 as hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4f358fdf9dea44828c9558a473f8cf53 2024-11-19T04:54:56,811 INFO [M:0;08a7f35e60d4:36569 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40649/user/jenkins/test-data/f82d5b18-76f7-4fe4-46d5-42c10f7a5d0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4f358fdf9dea44828c9558a473f8cf53, entries=1, sequenceid=29, filesize=4.9 K 2024-11-19T04:54:56,812 INFO [M:0;08a7f35e60d4:36569 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 594ms, sequenceid=29, compaction requested=false 2024-11-19T04:54:56,814 INFO [M:0;08a7f35e60d4:36569 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:54:56,814 DEBUG [M:0;08a7f35e60d4:36569 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731992096217Disabling compacts and flushes for region at 1731992096217Disabling writes for close at 1731992096218 (+1 ms)Obtaining lock to block concurrent updates at 1731992096218Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731992096218Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731992096219 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731992096220 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731992096220Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731992096244 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731992096244Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731992096265 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731992096283 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731992096283Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731992096307 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731992096330 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731992096330Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731992096346 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731992096364 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731992096364Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7a9ab86d: reopening flushed file at 1731992096782 (+418 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@492fe6ed: reopening flushed file at 1731992096790 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@30bf2c5e: reopening flushed file at 1731992096797 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7d8baad5: reopening flushed file at 1731992096804 (+7 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 594ms, sequenceid=29, compaction requested=false at 1731992096812 (+8 ms)Writing region close event to WAL at 1731992096814 (+2 ms)Closed at 1731992096814 2024-11-19T04:54:56,815 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:56,815 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:56,815 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:56,815 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:56,815 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:54:56,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35465 is added to blk_1073741830_1006 (size=10311) 2024-11-19T04:54:56,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37507 is added to blk_1073741830_1006 (size=10311) 2024-11-19T04:54:56,819 INFO [M:0;08a7f35e60d4:36569 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T04:54:56,819 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T04:54:56,819 INFO [M:0;08a7f35e60d4:36569 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36569 2024-11-19T04:54:56,820 INFO [M:0;08a7f35e60d4:36569 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T04:54:56,834 INFO [regionserver/08a7f35e60d4:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T04:54:56,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36569-0x1012e93ef6d0000, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T04:54:56,922 INFO [M:0;08a7f35e60d4:36569 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T04:54:56,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36569-0x1012e93ef6d0000, quorum=127.0.0.1:58257, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T04:54:56,924 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2d183c93{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:54:56,925 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7154ca22{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T04:54:56,925 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T04:54:56,925 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b44e274{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T04:54:56,925 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c1be80f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/hadoop.log.dir/,STOPPED} 2024-11-19T04:54:56,926 WARN [BP-1756748749-172.17.0.2-1731992093165 heartbeating to localhost/127.0.0.1:40649 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T04:54:56,926 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T04:54:56,926 WARN [BP-1756748749-172.17.0.2-1731992093165 heartbeating to localhost/127.0.0.1:40649 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1756748749-172.17.0.2-1731992093165 (Datanode Uuid fb326dc8-058d-4e47-8b6a-9d08fe7ff095) service to localhost/127.0.0.1:40649 2024-11-19T04:54:56,926 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T04:54:56,927 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/cluster_ba6f92a7-d1ac-1f96-b390-ba7e329039c7/data/data3/current/BP-1756748749-172.17.0.2-1731992093165 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:54:56,927 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/cluster_ba6f92a7-d1ac-1f96-b390-ba7e329039c7/data/data4/current/BP-1756748749-172.17.0.2-1731992093165 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:54:56,927 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T04:54:56,930 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@542ee468{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:54:56,930 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@75c88313{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T04:54:56,930 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T04:54:56,930 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4edee9ab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T04:54:56,930 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@eab7acc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/hadoop.log.dir/,STOPPED} 2024-11-19T04:54:56,931 WARN [BP-1756748749-172.17.0.2-1731992093165 heartbeating to localhost/127.0.0.1:40649 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T04:54:56,931 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T04:54:56,932 WARN [BP-1756748749-172.17.0.2-1731992093165 heartbeating to localhost/127.0.0.1:40649 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1756748749-172.17.0.2-1731992093165 (Datanode Uuid a3d3c148-353a-48a3-8df1-ecf37c5e5443) service to localhost/127.0.0.1:40649 2024-11-19T04:54:56,932 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T04:54:56,932 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/cluster_ba6f92a7-d1ac-1f96-b390-ba7e329039c7/data/data1/current/BP-1756748749-172.17.0.2-1731992093165 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:54:56,932 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/cluster_ba6f92a7-d1ac-1f96-b390-ba7e329039c7/data/data2/current/BP-1756748749-172.17.0.2-1731992093165 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:54:56,933 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T04:54:56,939 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@49a88a00{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T04:54:56,939 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4e4256d4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T04:54:56,939 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T04:54:56,939 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3197ca45{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T04:54:56,940 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f681677{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/hadoop.log.dir/,STOPPED} 2024-11-19T04:54:56,946 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T04:54:56,962 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T04:54:56,962 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T04:54:56,962 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/hadoop.log.dir so I do NOT create it in target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50 2024-11-19T04:54:56,962 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b1093eb7-96b9-17ee-3778-97521f906159/hadoop.tmp.dir so I do NOT create it in target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50 2024-11-19T04:54:56,962 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5, deleteOnExit=true 2024-11-19T04:54:56,962 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T04:54:56,962 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/test.cache.data in system properties and HBase conf 2024-11-19T04:54:56,962 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T04:54:56,963 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/hadoop.log.dir in system properties and HBase conf 2024-11-19T04:54:56,963 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T04:54:56,963 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T04:54:56,963 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T04:54:56,963 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T04:54:56,963 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T04:54:56,963 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T04:54:56,963 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T04:54:56,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T04:54:56,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T04:54:56,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T04:54:56,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T04:54:56,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T04:54:56,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T04:54:56,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/nfs.dump.dir in system properties and HBase conf 2024-11-19T04:54:56,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/java.io.tmpdir in system properties and HBase conf 2024-11-19T04:54:56,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T04:54:56,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T04:54:56,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T04:54:56,979 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T04:54:57,080 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:54:57,086 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T04:54:57,090 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T04:54:57,090 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T04:54:57,090 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T04:54:57,091 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:54:57,091 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@17c48ca{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/hadoop.log.dir/,AVAILABLE} 2024-11-19T04:54:57,092 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35c95cb4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T04:54:57,146 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T04:54:57,147 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T04:54:57,148 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-19T04:54:57,148 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-19T04:54:57,212 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5519c514{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/java.io.tmpdir/jetty-localhost-46867-hadoop-hdfs-3_4_1-tests_jar-_-any-15156541085270000377/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T04:54:57,212 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@47f2ada2{HTTP/1.1, (http/1.1)}{localhost:46867} 2024-11-19T04:54:57,213 INFO [Time-limited test {}] server.Server(415): Started @105257ms 2024-11-19T04:54:57,228 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T04:54:57,318 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:54:57,324 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T04:54:57,325 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T04:54:57,325 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T04:54:57,325 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T04:54:57,326 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c3d2a60{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/hadoop.log.dir/,AVAILABLE} 2024-11-19T04:54:57,326 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5917cb43{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T04:54:57,448 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1aa07d80{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/java.io.tmpdir/jetty-localhost-32797-hadoop-hdfs-3_4_1-tests_jar-_-any-13757148855708121515/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:54:57,448 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7c814f59{HTTP/1.1, (http/1.1)}{localhost:32797} 2024-11-19T04:54:57,448 INFO [Time-limited test {}] server.Server(415): Started @105493ms 2024-11-19T04:54:57,451 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T04:54:57,533 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:54:57,540 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T04:54:57,544 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T04:54:57,544 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T04:54:57,545 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T04:54:57,549 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@198c3788{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/hadoop.log.dir/,AVAILABLE} 2024-11-19T04:54:57,549 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6471b09b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T04:54:57,560 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:54:57,567 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:54:57,618 WARN [Thread-656 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data1/current/BP-39784048-172.17.0.2-1731992096997/current, will proceed with Du for space computation calculation, 2024-11-19T04:54:57,619 WARN [Thread-657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data2/current/BP-39784048-172.17.0.2-1731992096997/current, will proceed with Du for space computation calculation, 2024-11-19T04:54:57,663 WARN [Thread-635 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T04:54:57,677 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa6a5fcb5401bfc54 with lease ID 0xb3ec429c80b48453: Processing first storage report for DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9 from datanode DatanodeRegistration(127.0.0.1:43353, datanodeUuid=510b84df-c9b5-4629-b343-b6c817b1ee7c, infoPort=43361, infoSecurePort=0, ipcPort=39473, storageInfo=lv=-57;cid=testClusterID;nsid=85150078;c=1731992096997) 2024-11-19T04:54:57,677 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa6a5fcb5401bfc54 with lease ID 0xb3ec429c80b48453: from storage DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9 node DatanodeRegistration(127.0.0.1:43353, datanodeUuid=510b84df-c9b5-4629-b343-b6c817b1ee7c, infoPort=43361, infoSecurePort=0, ipcPort=39473, storageInfo=lv=-57;cid=testClusterID;nsid=85150078;c=1731992096997), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:54:57,677 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa6a5fcb5401bfc54 with lease ID 0xb3ec429c80b48453: Processing first storage report for DS-ca3928a6-4a19-4349-a2b3-1faaaabeec2b from datanode DatanodeRegistration(127.0.0.1:43353, datanodeUuid=510b84df-c9b5-4629-b343-b6c817b1ee7c, infoPort=43361, infoSecurePort=0, ipcPort=39473, storageInfo=lv=-57;cid=testClusterID;nsid=85150078;c=1731992096997) 2024-11-19T04:54:57,677 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa6a5fcb5401bfc54 with lease ID 0xb3ec429c80b48453: from storage DS-ca3928a6-4a19-4349-a2b3-1faaaabeec2b node DatanodeRegistration(127.0.0.1:43353, datanodeUuid=510b84df-c9b5-4629-b343-b6c817b1ee7c, infoPort=43361, infoSecurePort=0, ipcPort=39473, storageInfo=lv=-57;cid=testClusterID;nsid=85150078;c=1731992096997), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:54:57,698 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@55c8142a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/java.io.tmpdir/jetty-localhost-35713-hadoop-hdfs-3_4_1-tests_jar-_-any-4933233571152111822/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:54:57,698 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@65f2c48f{HTTP/1.1, (http/1.1)}{localhost:35713} 2024-11-19T04:54:57,698 INFO [Time-limited test {}] server.Server(415): Started @105743ms 2024-11-19T04:54:57,700 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T04:54:57,834 WARN [Thread-682 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data3/current/BP-39784048-172.17.0.2-1731992096997/current, will proceed with Du for space computation calculation, 2024-11-19T04:54:57,835 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data4/current/BP-39784048-172.17.0.2-1731992096997/current, will proceed with Du for space computation calculation, 2024-11-19T04:54:57,861 WARN [Thread-671 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T04:54:57,865 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2d0294d85faad263 with lease ID 0xb3ec429c80b48454: Processing first storage report for DS-937871f2-4c41-459f-abfb-7cc01ab45ce0 from datanode DatanodeRegistration(127.0.0.1:38875, datanodeUuid=85b1f523-508c-4d33-9736-eb00eb9ff733, infoPort=43029, infoSecurePort=0, ipcPort=45187, storageInfo=lv=-57;cid=testClusterID;nsid=85150078;c=1731992096997) 2024-11-19T04:54:57,865 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2d0294d85faad263 with lease ID 0xb3ec429c80b48454: from storage DS-937871f2-4c41-459f-abfb-7cc01ab45ce0 node DatanodeRegistration(127.0.0.1:38875, datanodeUuid=85b1f523-508c-4d33-9736-eb00eb9ff733, infoPort=43029, infoSecurePort=0, ipcPort=45187, storageInfo=lv=-57;cid=testClusterID;nsid=85150078;c=1731992096997), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:54:57,865 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2d0294d85faad263 with lease ID 0xb3ec429c80b48454: Processing first storage report for DS-dd2f7d9a-e4bb-46ef-b82c-e580068999d8 from datanode DatanodeRegistration(127.0.0.1:38875, datanodeUuid=85b1f523-508c-4d33-9736-eb00eb9ff733, infoPort=43029, infoSecurePort=0, ipcPort=45187, storageInfo=lv=-57;cid=testClusterID;nsid=85150078;c=1731992096997) 2024-11-19T04:54:57,865 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2d0294d85faad263 with lease ID 0xb3ec429c80b48454: from storage DS-dd2f7d9a-e4bb-46ef-b82c-e580068999d8 node DatanodeRegistration(127.0.0.1:38875, datanodeUuid=85b1f523-508c-4d33-9736-eb00eb9ff733, infoPort=43029, infoSecurePort=0, ipcPort=45187, storageInfo=lv=-57;cid=testClusterID;nsid=85150078;c=1731992096997), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:54:57,965 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50 2024-11-19T04:54:57,968 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/zookeeper_0, clientPort=50716, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T04:54:57,969 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50716 2024-11-19T04:54:57,969 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:54:57,971 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:54:57,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38875 is added to blk_1073741825_1001 (size=7) 2024-11-19T04:54:57,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43353 is added to blk_1073741825_1001 (size=7) 2024-11-19T04:54:57,982 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7 with version=8 2024-11-19T04:54:57,983 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/hbase-staging 2024-11-19T04:54:57,985 INFO [Time-limited test {}] client.ConnectionUtils(128): master/08a7f35e60d4:0 server-side Connection retries=45 2024-11-19T04:54:57,985 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T04:54:57,985 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T04:54:57,985 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T04:54:57,985 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T04:54:57,985 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T04:54:57,985 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T04:54:57,986 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T04:54:57,986 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45139 2024-11-19T04:54:57,988 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45139 connecting to ZooKeeper ensemble=127.0.0.1:50716 2024-11-19T04:54:57,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:451390x0, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T04:54:57,994 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45139-0x1012e93ff3f0000 connected 2024-11-19T04:54:58,020 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:54:58,023 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:54:58,027 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45139-0x1012e93ff3f0000, quorum=127.0.0.1:50716, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T04:54:58,027 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7, hbase.cluster.distributed=false 2024-11-19T04:54:58,030 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45139-0x1012e93ff3f0000, quorum=127.0.0.1:50716, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T04:54:58,031 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45139 2024-11-19T04:54:58,032 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45139 2024-11-19T04:54:58,032 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45139 2024-11-19T04:54:58,034 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45139 2024-11-19T04:54:58,034 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45139 2024-11-19T04:54:58,059 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/08a7f35e60d4:0 server-side Connection retries=45 2024-11-19T04:54:58,059 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T04:54:58,059 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T04:54:58,059 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T04:54:58,059 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T04:54:58,059 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T04:54:58,059 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T04:54:58,060 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T04:54:58,061 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38579 2024-11-19T04:54:58,063 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38579 connecting to ZooKeeper ensemble=127.0.0.1:50716 2024-11-19T04:54:58,063 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:54:58,066 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:54:58,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:385790x0, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T04:54:58,071 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:385790x0, quorum=127.0.0.1:50716, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T04:54:58,071 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38579-0x1012e93ff3f0001 connected 2024-11-19T04:54:58,071 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T04:54:58,072 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T04:54:58,073 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38579-0x1012e93ff3f0001, quorum=127.0.0.1:50716, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T04:54:58,074 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38579-0x1012e93ff3f0001, quorum=127.0.0.1:50716, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T04:54:58,074 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38579 2024-11-19T04:54:58,074 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38579 2024-11-19T04:54:58,075 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38579 2024-11-19T04:54:58,076 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38579 2024-11-19T04:54:58,076 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38579 2024-11-19T04:54:58,087 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T04:54:58,096 DEBUG [M:0;08a7f35e60d4:45139 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;08a7f35e60d4:45139 2024-11-19T04:54:58,097 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/08a7f35e60d4,45139,1731992097985 2024-11-19T04:54:58,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38579-0x1012e93ff3f0001, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T04:54:58,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45139-0x1012e93ff3f0000, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T04:54:58,102 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45139-0x1012e93ff3f0000, quorum=127.0.0.1:50716, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/08a7f35e60d4,45139,1731992097985 2024-11-19T04:54:58,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45139-0x1012e93ff3f0000, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:54:58,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38579-0x1012e93ff3f0001, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T04:54:58,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38579-0x1012e93ff3f0001, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:54:58,105 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45139-0x1012e93ff3f0000, quorum=127.0.0.1:50716, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T04:54:58,109 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/08a7f35e60d4,45139,1731992097985 from backup master directory 2024-11-19T04:54:58,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38579-0x1012e93ff3f0001, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T04:54:58,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45139-0x1012e93ff3f0000, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/08a7f35e60d4,45139,1731992097985 2024-11-19T04:54:58,110 WARN [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T04:54:58,110 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=08a7f35e60d4,45139,1731992097985 2024-11-19T04:54:58,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45139-0x1012e93ff3f0000, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T04:54:58,114 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:54:58,116 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/hbase.id] with ID: e0d915a1-8c88-4f97-a0ab-e29f3ab3b56a 2024-11-19T04:54:58,116 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/.tmp/hbase.id 2024-11-19T04:54:58,118 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:54:58,118 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:54:58,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43353 is added to blk_1073741826_1002 (size=42) 2024-11-19T04:54:58,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38875 is added to blk_1073741826_1002 (size=42) 2024-11-19T04:54:58,136 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/.tmp/hbase.id]:[hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/hbase.id] 2024-11-19T04:54:58,149 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:54:58,149 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T04:54:58,151 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-19T04:54:58,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38579-0x1012e93ff3f0001, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:54:58,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45139-0x1012e93ff3f0000, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:54:58,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43353 is added to blk_1073741827_1003 (size=196) 2024-11-19T04:54:58,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38875 is added to blk_1073741827_1003 (size=196) 2024-11-19T04:54:58,161 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T04:54:58,162 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T04:54:58,162 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T04:54:58,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43353 is added to blk_1073741828_1004 (size=1189) 2024-11-19T04:54:58,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38875 is added to blk_1073741828_1004 (size=1189) 2024-11-19T04:54:58,171 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/data/master/store 2024-11-19T04:54:58,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43353 is added to blk_1073741829_1005 (size=34) 2024-11-19T04:54:58,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38875 is added to blk_1073741829_1005 (size=34) 2024-11-19T04:54:58,178 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:54:58,179 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T04:54:58,179 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:54:58,179 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:54:58,179 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T04:54:58,179 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:54:58,179 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:54:58,179 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731992098179Disabling compacts and flushes for region at 1731992098179Disabling writes for close at 1731992098179Writing region close event to WAL at 1731992098179Closed at 1731992098179 2024-11-19T04:54:58,180 WARN [master/08a7f35e60d4:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/data/master/store/.initializing 2024-11-19T04:54:58,180 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/WALs/08a7f35e60d4,45139,1731992097985 2024-11-19T04:54:58,183 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=08a7f35e60d4%2C45139%2C1731992097985, suffix=, logDir=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/WALs/08a7f35e60d4,45139,1731992097985, archiveDir=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/oldWALs, maxLogs=10 2024-11-19T04:54:58,183 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C45139%2C1731992097985.1731992098183 2024-11-19T04:54:58,188 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/WALs/08a7f35e60d4,45139,1731992097985/08a7f35e60d4%2C45139%2C1731992097985.1731992098183 2024-11-19T04:54:58,189 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43029:43029),(127.0.0.1/127.0.0.1:43361:43361)] 2024-11-19T04:54:58,191 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T04:54:58,191 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:54:58,192 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:54:58,192 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:54:58,193 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:54:58,195 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T04:54:58,195 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:54:58,195 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:54:58,195 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:54:58,197 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T04:54:58,197 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:54:58,197 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T04:54:58,197 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:54:58,199 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T04:54:58,199 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:54:58,199 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T04:54:58,199 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:54:58,200 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T04:54:58,200 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:54:58,201 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T04:54:58,201 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:54:58,202 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:54:58,202 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:54:58,204 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:54:58,204 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:54:58,204 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T04:54:58,205 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:54:58,208 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T04:54:58,208 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=814199, jitterRate=0.03530842065811157}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T04:54:58,209 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731992098192Initializing all the Stores at 1731992098193 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992098193Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992098193Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992098193Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992098193Cleaning up temporary data from old regions at 1731992098204 (+11 ms)Region opened successfully at 1731992098209 (+5 ms) 2024-11-19T04:54:58,210 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T04:54:58,213 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3cd3ee1f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=08a7f35e60d4/172.17.0.2:0 2024-11-19T04:54:58,214 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T04:54:58,214 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T04:54:58,214 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T04:54:58,214 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T04:54:58,215 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-19T04:54:58,215 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-19T04:54:58,215 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T04:54:58,218 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T04:54:58,219 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45139-0x1012e93ff3f0000, quorum=127.0.0.1:50716, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T04:54:58,221 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T04:54:58,222 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T04:54:58,222 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45139-0x1012e93ff3f0000, quorum=127.0.0.1:50716, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T04:54:58,224 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T04:54:58,224 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T04:54:58,225 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45139-0x1012e93ff3f0000, quorum=127.0.0.1:50716, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T04:54:58,226 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T04:54:58,227 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45139-0x1012e93ff3f0000, quorum=127.0.0.1:50716, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T04:54:58,228 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T04:54:58,230 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45139-0x1012e93ff3f0000, quorum=127.0.0.1:50716, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T04:54:58,232 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T04:54:58,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45139-0x1012e93ff3f0000, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T04:54:58,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38579-0x1012e93ff3f0001, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T04:54:58,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45139-0x1012e93ff3f0000, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:54:58,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38579-0x1012e93ff3f0001, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:54:58,234 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=08a7f35e60d4,45139,1731992097985, sessionid=0x1012e93ff3f0000, setting cluster-up flag (Was=false) 2024-11-19T04:54:58,237 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38579-0x1012e93ff3f0001, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:54:58,237 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45139-0x1012e93ff3f0000, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:54:58,248 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T04:54:58,249 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=08a7f35e60d4,45139,1731992097985 2024-11-19T04:54:58,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45139-0x1012e93ff3f0000, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:54:58,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38579-0x1012e93ff3f0001, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:54:58,259 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T04:54:58,260 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=08a7f35e60d4,45139,1731992097985 2024-11-19T04:54:58,262 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T04:54:58,264 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T04:54:58,265 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T04:54:58,265 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T04:54:58,265 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 08a7f35e60d4,45139,1731992097985 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T04:54:58,267 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/08a7f35e60d4:0, corePoolSize=5, maxPoolSize=5 2024-11-19T04:54:58,267 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/08a7f35e60d4:0, corePoolSize=5, maxPoolSize=5 2024-11-19T04:54:58,267 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/08a7f35e60d4:0, corePoolSize=5, maxPoolSize=5 2024-11-19T04:54:58,267 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/08a7f35e60d4:0, corePoolSize=5, maxPoolSize=5 2024-11-19T04:54:58,267 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/08a7f35e60d4:0, corePoolSize=10, maxPoolSize=10 2024-11-19T04:54:58,267 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:58,267 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/08a7f35e60d4:0, corePoolSize=2, maxPoolSize=2 2024-11-19T04:54:58,267 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:58,268 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731992128268 2024-11-19T04:54:58,268 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T04:54:58,269 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T04:54:58,269 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T04:54:58,269 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T04:54:58,269 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T04:54:58,269 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T04:54:58,269 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:58,269 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T04:54:58,269 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T04:54:58,269 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T04:54:58,269 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T04:54:58,269 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T04:54:58,270 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T04:54:58,270 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T04:54:58,271 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:54:58,271 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T04:54:58,276 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.large.0-1731992098270,5,FailOnTimeoutGroup] 2024-11-19T04:54:58,276 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.small.0-1731992098276,5,FailOnTimeoutGroup] 2024-11-19T04:54:58,277 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:58,277 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T04:54:58,277 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:58,277 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:58,280 INFO [RS:0;08a7f35e60d4:38579 {}] regionserver.HRegionServer(746): ClusterId : e0d915a1-8c88-4f97-a0ab-e29f3ab3b56a 2024-11-19T04:54:58,280 DEBUG [RS:0;08a7f35e60d4:38579 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T04:54:58,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38875 is added to blk_1073741831_1007 (size=1321) 2024-11-19T04:54:58,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43353 is added to blk_1073741831_1007 (size=1321) 2024-11-19T04:54:58,287 DEBUG [RS:0;08a7f35e60d4:38579 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T04:54:58,287 DEBUG [RS:0;08a7f35e60d4:38579 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T04:54:58,288 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T04:54:58,288 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7 2024-11-19T04:54:58,290 DEBUG [RS:0;08a7f35e60d4:38579 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T04:54:58,290 DEBUG [RS:0;08a7f35e60d4:38579 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59702f8c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=08a7f35e60d4/172.17.0.2:0 2024-11-19T04:54:58,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38875 is added to blk_1073741832_1008 (size=32) 2024-11-19T04:54:58,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43353 is added to blk_1073741832_1008 (size=32) 2024-11-19T04:54:58,298 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:54:58,299 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T04:54:58,301 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T04:54:58,301 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:54:58,301 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:54:58,302 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T04:54:58,303 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T04:54:58,303 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:54:58,304 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:54:58,304 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T04:54:58,305 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T04:54:58,305 DEBUG [RS:0;08a7f35e60d4:38579 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;08a7f35e60d4:38579 2024-11-19T04:54:58,305 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:54:58,305 INFO [RS:0;08a7f35e60d4:38579 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T04:54:58,305 INFO [RS:0;08a7f35e60d4:38579 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T04:54:58,305 DEBUG [RS:0;08a7f35e60d4:38579 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T04:54:58,306 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:54:58,306 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T04:54:58,306 INFO [RS:0;08a7f35e60d4:38579 {}] regionserver.HRegionServer(2659): reportForDuty to master=08a7f35e60d4,45139,1731992097985 with port=38579, startcode=1731992098058 2024-11-19T04:54:58,306 DEBUG [RS:0;08a7f35e60d4:38579 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T04:54:58,307 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T04:54:58,307 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:54:58,308 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:54:58,308 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T04:54:58,309 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/hbase/meta/1588230740 2024-11-19T04:54:58,309 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/hbase/meta/1588230740 2024-11-19T04:54:58,310 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T04:54:58,310 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T04:54:58,311 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T04:54:58,312 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T04:54:58,314 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52007, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T04:54:58,314 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45139 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 08a7f35e60d4,38579,1731992098058 2024-11-19T04:54:58,314 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45139 {}] master.ServerManager(517): Registering regionserver=08a7f35e60d4,38579,1731992098058 2024-11-19T04:54:58,315 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T04:54:58,316 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=773633, jitterRate=-0.016275644302368164}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T04:54:58,316 DEBUG [RS:0;08a7f35e60d4:38579 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7 2024-11-19T04:54:58,316 DEBUG [RS:0;08a7f35e60d4:38579 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41423 2024-11-19T04:54:58,316 DEBUG [RS:0;08a7f35e60d4:38579 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T04:54:58,316 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731992098298Initializing all the Stores at 1731992098299 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992098299Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992098299Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992098299Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992098299Cleaning up temporary data from old regions at 1731992098310 (+11 ms)Region opened successfully at 1731992098316 (+6 ms) 2024-11-19T04:54:58,317 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T04:54:58,317 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T04:54:58,317 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T04:54:58,317 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T04:54:58,317 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T04:54:58,317 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T04:54:58,317 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731992098317Disabling compacts and flushes for region at 1731992098317Disabling writes for close at 1731992098317Writing region close event to WAL at 1731992098317Closed at 1731992098317 2024-11-19T04:54:58,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45139-0x1012e93ff3f0000, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T04:54:58,319 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T04:54:58,319 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T04:54:58,319 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T04:54:58,319 DEBUG [RS:0;08a7f35e60d4:38579 {}] zookeeper.ZKUtil(111): regionserver:38579-0x1012e93ff3f0001, quorum=127.0.0.1:50716, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/08a7f35e60d4,38579,1731992098058 2024-11-19T04:54:58,319 WARN [RS:0;08a7f35e60d4:38579 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T04:54:58,319 INFO [RS:0;08a7f35e60d4:38579 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T04:54:58,319 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [08a7f35e60d4,38579,1731992098058] 2024-11-19T04:54:58,320 DEBUG [RS:0;08a7f35e60d4:38579 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058 2024-11-19T04:54:58,321 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T04:54:58,322 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T04:54:58,324 INFO [RS:0;08a7f35e60d4:38579 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T04:54:58,327 INFO [RS:0;08a7f35e60d4:38579 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T04:54:58,328 INFO [RS:0;08a7f35e60d4:38579 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T04:54:58,328 INFO [RS:0;08a7f35e60d4:38579 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:58,328 INFO [RS:0;08a7f35e60d4:38579 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T04:54:58,329 INFO [RS:0;08a7f35e60d4:38579 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T04:54:58,330 INFO [RS:0;08a7f35e60d4:38579 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:58,330 DEBUG [RS:0;08a7f35e60d4:38579 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:58,330 DEBUG [RS:0;08a7f35e60d4:38579 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:58,330 DEBUG [RS:0;08a7f35e60d4:38579 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:58,330 DEBUG [RS:0;08a7f35e60d4:38579 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:58,330 DEBUG [RS:0;08a7f35e60d4:38579 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:58,330 DEBUG [RS:0;08a7f35e60d4:38579 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/08a7f35e60d4:0, corePoolSize=2, maxPoolSize=2 2024-11-19T04:54:58,330 DEBUG [RS:0;08a7f35e60d4:38579 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:58,330 DEBUG [RS:0;08a7f35e60d4:38579 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:58,330 DEBUG [RS:0;08a7f35e60d4:38579 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:58,330 DEBUG [RS:0;08a7f35e60d4:38579 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:58,330 DEBUG [RS:0;08a7f35e60d4:38579 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:58,331 DEBUG [RS:0;08a7f35e60d4:38579 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:58,331 DEBUG [RS:0;08a7f35e60d4:38579 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/08a7f35e60d4:0, corePoolSize=3, maxPoolSize=3 2024-11-19T04:54:58,331 DEBUG [RS:0;08a7f35e60d4:38579 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0, corePoolSize=3, maxPoolSize=3 2024-11-19T04:54:58,332 INFO [RS:0;08a7f35e60d4:38579 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:58,332 INFO [RS:0;08a7f35e60d4:38579 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:58,332 INFO [RS:0;08a7f35e60d4:38579 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:58,332 INFO [RS:0;08a7f35e60d4:38579 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:58,332 INFO [RS:0;08a7f35e60d4:38579 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:58,332 INFO [RS:0;08a7f35e60d4:38579 {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,38579,1731992098058-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T04:54:58,349 INFO [RS:0;08a7f35e60d4:38579 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T04:54:58,349 INFO [RS:0;08a7f35e60d4:38579 {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,38579,1731992098058-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:58,349 INFO [RS:0;08a7f35e60d4:38579 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:58,349 INFO [RS:0;08a7f35e60d4:38579 {}] regionserver.Replication(171): 08a7f35e60d4,38579,1731992098058 started 2024-11-19T04:54:58,365 INFO [RS:0;08a7f35e60d4:38579 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:58,365 INFO [RS:0;08a7f35e60d4:38579 {}] regionserver.HRegionServer(1482): Serving as 08a7f35e60d4,38579,1731992098058, RpcServer on 08a7f35e60d4/172.17.0.2:38579, sessionid=0x1012e93ff3f0001 2024-11-19T04:54:58,365 DEBUG [RS:0;08a7f35e60d4:38579 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T04:54:58,365 DEBUG [RS:0;08a7f35e60d4:38579 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 08a7f35e60d4,38579,1731992098058 2024-11-19T04:54:58,365 DEBUG [RS:0;08a7f35e60d4:38579 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '08a7f35e60d4,38579,1731992098058' 2024-11-19T04:54:58,365 DEBUG [RS:0;08a7f35e60d4:38579 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T04:54:58,366 DEBUG [RS:0;08a7f35e60d4:38579 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T04:54:58,367 DEBUG [RS:0;08a7f35e60d4:38579 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T04:54:58,367 DEBUG [RS:0;08a7f35e60d4:38579 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T04:54:58,367 DEBUG [RS:0;08a7f35e60d4:38579 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 08a7f35e60d4,38579,1731992098058 2024-11-19T04:54:58,367 DEBUG [RS:0;08a7f35e60d4:38579 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '08a7f35e60d4,38579,1731992098058' 2024-11-19T04:54:58,367 DEBUG [RS:0;08a7f35e60d4:38579 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T04:54:58,367 DEBUG [RS:0;08a7f35e60d4:38579 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T04:54:58,368 DEBUG [RS:0;08a7f35e60d4:38579 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T04:54:58,368 INFO [RS:0;08a7f35e60d4:38579 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T04:54:58,368 INFO [RS:0;08a7f35e60d4:38579 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T04:54:58,470 INFO [RS:0;08a7f35e60d4:38579 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=08a7f35e60d4%2C38579%2C1731992098058, suffix=, logDir=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058, archiveDir=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/oldWALs, maxLogs=32 2024-11-19T04:54:58,471 INFO [RS:0;08a7f35e60d4:38579 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C38579%2C1731992098058.1731992098471 2024-11-19T04:54:58,472 WARN [08a7f35e60d4:45139 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-19T04:54:58,482 INFO [RS:0;08a7f35e60d4:38579 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.1731992098471 2024-11-19T04:54:58,491 DEBUG [RS:0;08a7f35e60d4:38579 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43029:43029),(127.0.0.1/127.0.0.1:43361:43361)] 2024-11-19T04:54:58,723 DEBUG [08a7f35e60d4:45139 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T04:54:58,723 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=08a7f35e60d4,38579,1731992098058 2024-11-19T04:54:58,725 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 08a7f35e60d4,38579,1731992098058, state=OPENING 2024-11-19T04:54:58,727 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T04:54:58,731 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45139-0x1012e93ff3f0000, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:54:58,731 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38579-0x1012e93ff3f0001, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:54:58,732 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T04:54:58,732 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T04:54:58,732 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T04:54:58,732 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=08a7f35e60d4,38579,1731992098058}] 2024-11-19T04:54:58,886 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T04:54:58,888 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41207, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T04:54:58,892 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T04:54:58,892 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T04:54:58,894 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=08a7f35e60d4%2C38579%2C1731992098058.meta, suffix=.meta, logDir=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058, archiveDir=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/oldWALs, maxLogs=32 2024-11-19T04:54:58,895 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta 2024-11-19T04:54:58,900 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta 2024-11-19T04:54:58,901 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43361:43361),(127.0.0.1/127.0.0.1:43029:43029)] 2024-11-19T04:54:58,902 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T04:54:58,902 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T04:54:58,903 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T04:54:58,903 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T04:54:58,903 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T04:54:58,903 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:54:58,903 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T04:54:58,903 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T04:54:58,904 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T04:54:58,905 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T04:54:58,905 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:54:58,906 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:54:58,906 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T04:54:58,907 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T04:54:58,907 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:54:58,907 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:54:58,907 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T04:54:58,908 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T04:54:58,908 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:54:58,909 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:54:58,909 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T04:54:58,909 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T04:54:58,910 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:54:58,910 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:54:58,910 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T04:54:58,911 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/hbase/meta/1588230740 2024-11-19T04:54:58,912 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/hbase/meta/1588230740 2024-11-19T04:54:58,914 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T04:54:58,914 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T04:54:58,914 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T04:54:58,916 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T04:54:58,917 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=703184, jitterRate=-0.10585539042949677}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T04:54:58,917 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T04:54:58,918 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731992098903Writing region info on filesystem at 1731992098903Initializing all the Stores at 1731992098904 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992098904Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992098904Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992098904Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992098904Cleaning up temporary data from old regions at 1731992098914 (+10 ms)Running coprocessor post-open hooks at 1731992098917 (+3 ms)Region opened successfully at 1731992098918 (+1 ms) 2024-11-19T04:54:58,919 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731992098886 2024-11-19T04:54:58,922 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T04:54:58,922 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T04:54:58,923 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=08a7f35e60d4,38579,1731992098058 2024-11-19T04:54:58,924 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 08a7f35e60d4,38579,1731992098058, state=OPEN 2024-11-19T04:54:58,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38579-0x1012e93ff3f0001, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T04:54:58,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45139-0x1012e93ff3f0000, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T04:54:58,930 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=08a7f35e60d4,38579,1731992098058 2024-11-19T04:54:58,930 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T04:54:58,930 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T04:54:58,933 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T04:54:58,934 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=08a7f35e60d4,38579,1731992098058 in 198 msec 2024-11-19T04:54:58,938 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T04:54:58,938 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 615 msec 2024-11-19T04:54:58,939 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T04:54:58,939 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T04:54:58,940 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T04:54:58,940 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=08a7f35e60d4,38579,1731992098058, seqNum=-1] 2024-11-19T04:54:58,941 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T04:54:58,942 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60575, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T04:54:58,949 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 683 msec 2024-11-19T04:54:58,949 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731992098949, completionTime=-1 2024-11-19T04:54:58,949 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T04:54:58,949 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-19T04:54:58,951 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-19T04:54:58,951 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731992158951 2024-11-19T04:54:58,951 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731992218951 2024-11-19T04:54:58,951 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-19T04:54:58,951 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,45139,1731992097985-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:58,951 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,45139,1731992097985-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:58,951 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,45139,1731992097985-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:58,952 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-08a7f35e60d4:45139, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:58,952 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:58,952 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:58,954 DEBUG [master/08a7f35e60d4:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T04:54:58,955 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.845sec 2024-11-19T04:54:58,956 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T04:54:58,956 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T04:54:58,956 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T04:54:58,956 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T04:54:58,956 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T04:54:58,956 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,45139,1731992097985-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T04:54:58,956 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,45139,1731992097985-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T04:54:58,959 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T04:54:58,959 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T04:54:58,959 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,45139,1731992097985-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:58,980 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@597807df, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T04:54:58,980 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 08a7f35e60d4,45139,-1 for getting cluster id 2024-11-19T04:54:58,980 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T04:54:58,982 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e0d915a1-8c88-4f97-a0ab-e29f3ab3b56a' 2024-11-19T04:54:58,983 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T04:54:58,983 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e0d915a1-8c88-4f97-a0ab-e29f3ab3b56a" 2024-11-19T04:54:58,984 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d958443, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T04:54:58,984 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [08a7f35e60d4,45139,-1] 2024-11-19T04:54:58,984 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T04:54:58,985 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:54:58,986 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57000, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T04:54:58,987 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3efce601, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T04:54:58,988 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T04:54:58,989 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=08a7f35e60d4,38579,1731992098058, seqNum=-1] 2024-11-19T04:54:58,990 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T04:54:58,991 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39700, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T04:54:58,993 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=08a7f35e60d4,45139,1731992097985 2024-11-19T04:54:58,993 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:54:58,998 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-19T04:54:59,052 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/08a7f35e60d4:0 server-side Connection retries=45 2024-11-19T04:54:59,052 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T04:54:59,052 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T04:54:59,052 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T04:54:59,052 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T04:54:59,052 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T04:54:59,052 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T04:54:59,052 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T04:54:59,053 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42287 2024-11-19T04:54:59,055 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42287 connecting to ZooKeeper ensemble=127.0.0.1:50716 2024-11-19T04:54:59,055 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:54:59,057 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:54:59,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:422870x0, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T04:54:59,061 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42287-0x1012e93ff3f0002 connected 2024-11-19T04:54:59,061 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:42287-0x1012e93ff3f0002, quorum=127.0.0.1:50716, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-19T04:54:59,062 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-19T04:54:59,062 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T04:54:59,064 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T04:54:59,065 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:42287-0x1012e93ff3f0002, quorum=127.0.0.1:50716, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T04:54:59,066 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42287-0x1012e93ff3f0002, quorum=127.0.0.1:50716, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T04:54:59,068 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42287 2024-11-19T04:54:59,068 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42287 2024-11-19T04:54:59,068 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42287 2024-11-19T04:54:59,070 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42287 2024-11-19T04:54:59,071 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42287 2024-11-19T04:54:59,073 INFO [RS:1;08a7f35e60d4:42287 {}] regionserver.HRegionServer(746): ClusterId : e0d915a1-8c88-4f97-a0ab-e29f3ab3b56a 2024-11-19T04:54:59,073 DEBUG [RS:1;08a7f35e60d4:42287 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T04:54:59,076 DEBUG [RS:1;08a7f35e60d4:42287 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T04:54:59,077 DEBUG [RS:1;08a7f35e60d4:42287 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T04:54:59,078 DEBUG [RS:1;08a7f35e60d4:42287 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T04:54:59,079 DEBUG [RS:1;08a7f35e60d4:42287 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49d14175, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=08a7f35e60d4/172.17.0.2:0 2024-11-19T04:54:59,098 DEBUG [RS:1;08a7f35e60d4:42287 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;08a7f35e60d4:42287 2024-11-19T04:54:59,098 INFO [RS:1;08a7f35e60d4:42287 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T04:54:59,098 INFO [RS:1;08a7f35e60d4:42287 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T04:54:59,098 DEBUG [RS:1;08a7f35e60d4:42287 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T04:54:59,099 INFO [RS:1;08a7f35e60d4:42287 {}] regionserver.HRegionServer(2659): reportForDuty to master=08a7f35e60d4,45139,1731992097985 with port=42287, startcode=1731992099051 2024-11-19T04:54:59,099 DEBUG [RS:1;08a7f35e60d4:42287 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T04:54:59,101 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53947, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T04:54:59,102 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45139 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 08a7f35e60d4,42287,1731992099051 2024-11-19T04:54:59,102 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45139 {}] master.ServerManager(517): Registering regionserver=08a7f35e60d4,42287,1731992099051 2024-11-19T04:54:59,104 DEBUG [RS:1;08a7f35e60d4:42287 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7 2024-11-19T04:54:59,104 DEBUG [RS:1;08a7f35e60d4:42287 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41423 2024-11-19T04:54:59,104 DEBUG [RS:1;08a7f35e60d4:42287 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T04:54:59,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45139-0x1012e93ff3f0000, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T04:54:59,106 DEBUG [RS:1;08a7f35e60d4:42287 {}] zookeeper.ZKUtil(111): regionserver:42287-0x1012e93ff3f0002, quorum=127.0.0.1:50716, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/08a7f35e60d4,42287,1731992099051 2024-11-19T04:54:59,106 WARN [RS:1;08a7f35e60d4:42287 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T04:54:59,106 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [08a7f35e60d4,42287,1731992099051] 2024-11-19T04:54:59,106 INFO [RS:1;08a7f35e60d4:42287 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T04:54:59,106 DEBUG [RS:1;08a7f35e60d4:42287 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051 2024-11-19T04:54:59,110 INFO [RS:1;08a7f35e60d4:42287 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T04:54:59,112 INFO [RS:1;08a7f35e60d4:42287 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T04:54:59,112 INFO [RS:1;08a7f35e60d4:42287 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T04:54:59,113 INFO [RS:1;08a7f35e60d4:42287 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:59,113 INFO [RS:1;08a7f35e60d4:42287 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T04:54:59,114 INFO [RS:1;08a7f35e60d4:42287 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T04:54:59,114 INFO [RS:1;08a7f35e60d4:42287 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:59,114 DEBUG [RS:1;08a7f35e60d4:42287 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:59,114 DEBUG [RS:1;08a7f35e60d4:42287 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:59,114 DEBUG [RS:1;08a7f35e60d4:42287 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:59,114 DEBUG [RS:1;08a7f35e60d4:42287 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:59,114 DEBUG [RS:1;08a7f35e60d4:42287 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:59,114 DEBUG [RS:1;08a7f35e60d4:42287 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/08a7f35e60d4:0, corePoolSize=2, maxPoolSize=2 2024-11-19T04:54:59,114 DEBUG [RS:1;08a7f35e60d4:42287 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:59,114 DEBUG [RS:1;08a7f35e60d4:42287 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:59,114 DEBUG [RS:1;08a7f35e60d4:42287 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:59,114 DEBUG [RS:1;08a7f35e60d4:42287 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:59,115 DEBUG [RS:1;08a7f35e60d4:42287 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:59,115 DEBUG [RS:1;08a7f35e60d4:42287 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:54:59,115 DEBUG [RS:1;08a7f35e60d4:42287 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/08a7f35e60d4:0, corePoolSize=3, maxPoolSize=3 2024-11-19T04:54:59,115 DEBUG [RS:1;08a7f35e60d4:42287 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0, corePoolSize=3, maxPoolSize=3 2024-11-19T04:54:59,116 INFO [RS:1;08a7f35e60d4:42287 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:59,116 INFO [RS:1;08a7f35e60d4:42287 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:59,116 INFO [RS:1;08a7f35e60d4:42287 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:59,116 INFO [RS:1;08a7f35e60d4:42287 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:59,116 INFO [RS:1;08a7f35e60d4:42287 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:59,116 INFO [RS:1;08a7f35e60d4:42287 {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,42287,1731992099051-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T04:54:59,136 INFO [RS:1;08a7f35e60d4:42287 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T04:54:59,136 INFO [RS:1;08a7f35e60d4:42287 {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,42287,1731992099051-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:59,137 INFO [RS:1;08a7f35e60d4:42287 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:59,137 INFO [RS:1;08a7f35e60d4:42287 {}] regionserver.Replication(171): 08a7f35e60d4,42287,1731992099051 started 2024-11-19T04:54:59,154 INFO [RS:1;08a7f35e60d4:42287 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:54:59,154 INFO [RS:1;08a7f35e60d4:42287 {}] regionserver.HRegionServer(1482): Serving as 08a7f35e60d4,42287,1731992099051, RpcServer on 08a7f35e60d4/172.17.0.2:42287, sessionid=0x1012e93ff3f0002 2024-11-19T04:54:59,154 DEBUG [RS:1;08a7f35e60d4:42287 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T04:54:59,155 DEBUG [RS:1;08a7f35e60d4:42287 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 08a7f35e60d4,42287,1731992099051 2024-11-19T04:54:59,155 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;08a7f35e60d4:42287,5,FailOnTimeoutGroup] 2024-11-19T04:54:59,155 DEBUG [RS:1;08a7f35e60d4:42287 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '08a7f35e60d4,42287,1731992099051' 2024-11-19T04:54:59,155 DEBUG [RS:1;08a7f35e60d4:42287 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T04:54:59,155 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-19T04:54:59,155 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-19T04:54:59,155 DEBUG [RS:1;08a7f35e60d4:42287 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T04:54:59,156 DEBUG [RS:1;08a7f35e60d4:42287 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T04:54:59,156 DEBUG [RS:1;08a7f35e60d4:42287 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T04:54:59,156 DEBUG [RS:1;08a7f35e60d4:42287 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 08a7f35e60d4,42287,1731992099051 2024-11-19T04:54:59,156 DEBUG [RS:1;08a7f35e60d4:42287 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '08a7f35e60d4,42287,1731992099051' 2024-11-19T04:54:59,156 DEBUG [RS:1;08a7f35e60d4:42287 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T04:54:59,157 DEBUG [RS:1;08a7f35e60d4:42287 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T04:54:59,157 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 08a7f35e60d4,45139,1731992097985 2024-11-19T04:54:59,157 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3b0bde79 2024-11-19T04:54:59,157 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-19T04:54:59,157 DEBUG [RS:1;08a7f35e60d4:42287 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T04:54:59,157 INFO [RS:1;08a7f35e60d4:42287 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T04:54:59,157 INFO [RS:1;08a7f35e60d4:42287 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T04:54:59,159 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57014, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-19T04:54:59,159 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45139 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-19T04:54:59,160 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45139 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-19T04:54:59,160 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45139 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T04:54:59,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45139 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-19T04:54:59,163 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-19T04:54:59,163 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:54:59,163 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45139 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-19T04:54:59,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45139 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T04:54:59,164 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-19T04:54:59,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43353 is added to blk_1073741835_1011 (size=393) 2024-11-19T04:54:59,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38875 is added to blk_1073741835_1011 (size=393) 2024-11-19T04:54:59,173 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => f6a2e2ef47bf948c5ce3e0e7a516d28c, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731992099159.f6a2e2ef47bf948c5ce3e0e7a516d28c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7 2024-11-19T04:54:59,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43353 is added to blk_1073741836_1012 (size=76) 2024-11-19T04:54:59,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38875 is added to blk_1073741836_1012 (size=76) 2024-11-19T04:54:59,180 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731992099159.f6a2e2ef47bf948c5ce3e0e7a516d28c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:54:59,180 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing f6a2e2ef47bf948c5ce3e0e7a516d28c, disabling compactions & flushes 2024-11-19T04:54:59,180 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731992099159.f6a2e2ef47bf948c5ce3e0e7a516d28c. 2024-11-19T04:54:59,180 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731992099159.f6a2e2ef47bf948c5ce3e0e7a516d28c. 2024-11-19T04:54:59,180 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731992099159.f6a2e2ef47bf948c5ce3e0e7a516d28c. after waiting 0 ms 2024-11-19T04:54:59,180 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731992099159.f6a2e2ef47bf948c5ce3e0e7a516d28c. 2024-11-19T04:54:59,180 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731992099159.f6a2e2ef47bf948c5ce3e0e7a516d28c. 2024-11-19T04:54:59,181 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for f6a2e2ef47bf948c5ce3e0e7a516d28c: Waiting for close lock at 1731992099180Disabling compacts and flushes for region at 1731992099180Disabling writes for close at 1731992099180Writing region close event to WAL at 1731992099180Closed at 1731992099180 2024-11-19T04:54:59,182 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-19T04:54:59,183 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731992099159.f6a2e2ef47bf948c5ce3e0e7a516d28c.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731992099182"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731992099182"}]},"ts":"1731992099182"} 2024-11-19T04:54:59,185 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-19T04:54:59,186 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-19T04:54:59,186 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731992099186"}]},"ts":"1731992099186"} 2024-11-19T04:54:59,188 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-19T04:54:59,189 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=f6a2e2ef47bf948c5ce3e0e7a516d28c, ASSIGN}] 2024-11-19T04:54:59,190 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=f6a2e2ef47bf948c5ce3e0e7a516d28c, ASSIGN 2024-11-19T04:54:59,191 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=f6a2e2ef47bf948c5ce3e0e7a516d28c, ASSIGN; state=OFFLINE, location=08a7f35e60d4,38579,1731992098058; forceNewPlan=false, retain=false 2024-11-19T04:54:59,260 INFO [RS:1;08a7f35e60d4:42287 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=08a7f35e60d4%2C42287%2C1731992099051, suffix=, logDir=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051, archiveDir=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/oldWALs, maxLogs=32 2024-11-19T04:54:59,261 INFO [RS:1;08a7f35e60d4:42287 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C42287%2C1731992099051.1731992099260 2024-11-19T04:54:59,267 INFO [RS:1;08a7f35e60d4:42287 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 2024-11-19T04:54:59,267 DEBUG [RS:1;08a7f35e60d4:42287 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43361:43361),(127.0.0.1/127.0.0.1:43029:43029)] 2024-11-19T04:54:59,342 INFO [08a7f35e60d4:45139 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-19T04:54:59,343 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=f6a2e2ef47bf948c5ce3e0e7a516d28c, regionState=OPENING, regionLocation=08a7f35e60d4,38579,1731992098058 2024-11-19T04:54:59,346 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=f6a2e2ef47bf948c5ce3e0e7a516d28c, ASSIGN because future has completed 2024-11-19T04:54:59,346 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure f6a2e2ef47bf948c5ce3e0e7a516d28c, server=08a7f35e60d4,38579,1731992098058}] 2024-11-19T04:54:59,504 INFO [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731992099159.f6a2e2ef47bf948c5ce3e0e7a516d28c. 2024-11-19T04:54:59,504 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => f6a2e2ef47bf948c5ce3e0e7a516d28c, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731992099159.f6a2e2ef47bf948c5ce3e0e7a516d28c.', STARTKEY => '', ENDKEY => ''} 2024-11-19T04:54:59,505 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath f6a2e2ef47bf948c5ce3e0e7a516d28c 2024-11-19T04:54:59,505 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731992099159.f6a2e2ef47bf948c5ce3e0e7a516d28c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:54:59,505 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for f6a2e2ef47bf948c5ce3e0e7a516d28c 2024-11-19T04:54:59,505 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for f6a2e2ef47bf948c5ce3e0e7a516d28c 2024-11-19T04:54:59,507 INFO [StoreOpener-f6a2e2ef47bf948c5ce3e0e7a516d28c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region f6a2e2ef47bf948c5ce3e0e7a516d28c 2024-11-19T04:54:59,508 INFO [StoreOpener-f6a2e2ef47bf948c5ce3e0e7a516d28c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f6a2e2ef47bf948c5ce3e0e7a516d28c columnFamilyName info 2024-11-19T04:54:59,508 DEBUG [StoreOpener-f6a2e2ef47bf948c5ce3e0e7a516d28c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:54:59,509 INFO [StoreOpener-f6a2e2ef47bf948c5ce3e0e7a516d28c-1 {}] regionserver.HStore(327): Store=f6a2e2ef47bf948c5ce3e0e7a516d28c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T04:54:59,509 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for f6a2e2ef47bf948c5ce3e0e7a516d28c 2024-11-19T04:54:59,510 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c 2024-11-19T04:54:59,510 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c 2024-11-19T04:54:59,511 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for f6a2e2ef47bf948c5ce3e0e7a516d28c 2024-11-19T04:54:59,511 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for f6a2e2ef47bf948c5ce3e0e7a516d28c 2024-11-19T04:54:59,513 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for f6a2e2ef47bf948c5ce3e0e7a516d28c 2024-11-19T04:54:59,515 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T04:54:59,515 INFO [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened f6a2e2ef47bf948c5ce3e0e7a516d28c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=722482, jitterRate=-0.0813177227973938}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T04:54:59,515 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f6a2e2ef47bf948c5ce3e0e7a516d28c 2024-11-19T04:54:59,516 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for f6a2e2ef47bf948c5ce3e0e7a516d28c: Running coprocessor pre-open hook at 1731992099505Writing region info on filesystem at 1731992099505Initializing all the Stores at 1731992099506 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992099506Cleaning up temporary data from old regions at 1731992099511 (+5 ms)Running coprocessor post-open hooks at 1731992099515 (+4 ms)Region opened successfully at 1731992099516 (+1 ms) 2024-11-19T04:54:59,517 INFO [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731992099159.f6a2e2ef47bf948c5ce3e0e7a516d28c., pid=6, masterSystemTime=1731992099500 2024-11-19T04:54:59,519 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731992099159.f6a2e2ef47bf948c5ce3e0e7a516d28c. 2024-11-19T04:54:59,520 INFO [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731992099159.f6a2e2ef47bf948c5ce3e0e7a516d28c. 2024-11-19T04:54:59,520 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=f6a2e2ef47bf948c5ce3e0e7a516d28c, regionState=OPEN, openSeqNum=2, regionLocation=08a7f35e60d4,38579,1731992098058 2024-11-19T04:54:59,523 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure f6a2e2ef47bf948c5ce3e0e7a516d28c, server=08a7f35e60d4,38579,1731992098058 because future has completed 2024-11-19T04:54:59,527 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-19T04:54:59,527 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure f6a2e2ef47bf948c5ce3e0e7a516d28c, server=08a7f35e60d4,38579,1731992098058 in 178 msec 2024-11-19T04:54:59,529 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-19T04:54:59,529 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=f6a2e2ef47bf948c5ce3e0e7a516d28c, ASSIGN in 338 msec 2024-11-19T04:54:59,530 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-19T04:54:59,531 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731992099530"}]},"ts":"1731992099530"} 2024-11-19T04:54:59,533 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-19T04:54:59,534 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-19T04:54:59,536 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 374 msec 2024-11-19T04:55:04,405 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T04:55:04,425 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:55:04,427 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:55:04,428 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:55:04,436 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-19T04:55:07,146 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T04:55:07,146 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-19T04:55:07,147 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-19T04:55:07,147 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-19T04:55:07,148 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T04:55:07,148 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-19T04:55:07,148 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-19T04:55:07,148 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-19T04:55:09,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45139 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T04:55:09,218 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-19T04:55:09,218 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-19T04:55:09,222 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-19T04:55:09,222 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731992099159.f6a2e2ef47bf948c5ce3e0e7a516d28c. 2024-11-19T04:55:09,236 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:55:09,239 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T04:55:09,239 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T04:55:09,239 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T04:55:09,240 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T04:55:09,240 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ff5703b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/hadoop.log.dir/,AVAILABLE} 2024-11-19T04:55:09,240 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@36d0b5ff{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T04:55:09,355 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@9038e26{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/java.io.tmpdir/jetty-localhost-41289-hadoop-hdfs-3_4_1-tests_jar-_-any-3162399066196343741/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:55:09,356 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@501a5826{HTTP/1.1, (http/1.1)}{localhost:41289} 2024-11-19T04:55:09,356 INFO [Time-limited test {}] server.Server(415): Started @117401ms 2024-11-19T04:55:09,357 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T04:55:09,388 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:55:09,392 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T04:55:09,393 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T04:55:09,393 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T04:55:09,393 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T04:55:09,394 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@167a7fde{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/hadoop.log.dir/,AVAILABLE} 2024-11-19T04:55:09,394 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@712f5f14{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T04:55:09,448 WARN [Thread-827 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data5/current/BP-39784048-172.17.0.2-1731992096997/current, will proceed with Du for space computation calculation, 2024-11-19T04:55:09,448 WARN [Thread-828 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data6/current/BP-39784048-172.17.0.2-1731992096997/current, will proceed with Du for space computation calculation, 2024-11-19T04:55:09,472 WARN [Thread-807 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T04:55:09,474 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe4897c73002bb05e with lease ID 0xb3ec429c80b48455: Processing first storage report for DS-5eda46f5-e4b4-4b90-9fdf-bb6edd28d6df from datanode DatanodeRegistration(127.0.0.1:35747, datanodeUuid=25819d07-eb75-435a-ad81-b178b212ee16, infoPort=43337, infoSecurePort=0, ipcPort=46517, storageInfo=lv=-57;cid=testClusterID;nsid=85150078;c=1731992096997) 2024-11-19T04:55:09,474 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe4897c73002bb05e with lease ID 0xb3ec429c80b48455: from storage DS-5eda46f5-e4b4-4b90-9fdf-bb6edd28d6df node DatanodeRegistration(127.0.0.1:35747, datanodeUuid=25819d07-eb75-435a-ad81-b178b212ee16, infoPort=43337, infoSecurePort=0, ipcPort=46517, storageInfo=lv=-57;cid=testClusterID;nsid=85150078;c=1731992096997), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:55:09,475 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe4897c73002bb05e with lease ID 0xb3ec429c80b48455: Processing first storage report for DS-88b828bf-ed3f-40a0-aefe-32ac6974d119 from datanode DatanodeRegistration(127.0.0.1:35747, datanodeUuid=25819d07-eb75-435a-ad81-b178b212ee16, infoPort=43337, infoSecurePort=0, ipcPort=46517, storageInfo=lv=-57;cid=testClusterID;nsid=85150078;c=1731992096997) 2024-11-19T04:55:09,475 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe4897c73002bb05e with lease ID 0xb3ec429c80b48455: from storage DS-88b828bf-ed3f-40a0-aefe-32ac6974d119 node DatanodeRegistration(127.0.0.1:35747, datanodeUuid=25819d07-eb75-435a-ad81-b178b212ee16, infoPort=43337, infoSecurePort=0, ipcPort=46517, storageInfo=lv=-57;cid=testClusterID;nsid=85150078;c=1731992096997), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-19T04:55:09,511 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@272698f5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/java.io.tmpdir/jetty-localhost-46449-hadoop-hdfs-3_4_1-tests_jar-_-any-16947901435748059833/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:55:09,512 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@15bbd738{HTTP/1.1, (http/1.1)}{localhost:46449} 2024-11-19T04:55:09,512 INFO [Time-limited test {}] server.Server(415): Started @117557ms 2024-11-19T04:55:09,513 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T04:55:09,551 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:55:09,555 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T04:55:09,556 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T04:55:09,556 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T04:55:09,556 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T04:55:09,556 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1dc59954{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/hadoop.log.dir/,AVAILABLE} 2024-11-19T04:55:09,557 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@68004957{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T04:55:09,613 WARN [Thread-862 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data7/current/BP-39784048-172.17.0.2-1731992096997/current, will proceed with Du for space computation calculation, 2024-11-19T04:55:09,613 WARN [Thread-863 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data8/current/BP-39784048-172.17.0.2-1731992096997/current, will proceed with Du for space computation calculation, 2024-11-19T04:55:09,631 WARN [Thread-842 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T04:55:09,634 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x50275047013f68db with lease ID 0xb3ec429c80b48456: Processing first storage report for DS-815f78d8-f80b-45d3-92d4-6298459f6366 from datanode DatanodeRegistration(127.0.0.1:44731, datanodeUuid=c235e46d-bbeb-4660-9d16-4cb9ad997b48, infoPort=37081, infoSecurePort=0, ipcPort=45151, storageInfo=lv=-57;cid=testClusterID;nsid=85150078;c=1731992096997) 2024-11-19T04:55:09,634 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x50275047013f68db with lease ID 0xb3ec429c80b48456: from storage DS-815f78d8-f80b-45d3-92d4-6298459f6366 node DatanodeRegistration(127.0.0.1:44731, datanodeUuid=c235e46d-bbeb-4660-9d16-4cb9ad997b48, infoPort=37081, infoSecurePort=0, ipcPort=45151, storageInfo=lv=-57;cid=testClusterID;nsid=85150078;c=1731992096997), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:55:09,634 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x50275047013f68db with lease ID 0xb3ec429c80b48456: Processing first storage report for DS-7ecf79d7-439a-4f31-bc75-362f336493f4 from datanode DatanodeRegistration(127.0.0.1:44731, datanodeUuid=c235e46d-bbeb-4660-9d16-4cb9ad997b48, infoPort=37081, infoSecurePort=0, ipcPort=45151, storageInfo=lv=-57;cid=testClusterID;nsid=85150078;c=1731992096997) 2024-11-19T04:55:09,634 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x50275047013f68db with lease ID 0xb3ec429c80b48456: from storage DS-7ecf79d7-439a-4f31-bc75-362f336493f4 node DatanodeRegistration(127.0.0.1:44731, datanodeUuid=c235e46d-bbeb-4660-9d16-4cb9ad997b48, infoPort=37081, infoSecurePort=0, ipcPort=45151, storageInfo=lv=-57;cid=testClusterID;nsid=85150078;c=1731992096997), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:55:09,673 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@28246fba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/java.io.tmpdir/jetty-localhost-34597-hadoop-hdfs-3_4_1-tests_jar-_-any-1666888712800869591/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:55:09,674 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5056747b{HTTP/1.1, (http/1.1)}{localhost:34597} 2024-11-19T04:55:09,674 INFO [Time-limited test {}] server.Server(415): Started @117719ms 2024-11-19T04:55:09,675 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T04:55:09,775 WARN [Thread-888 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data9/current/BP-39784048-172.17.0.2-1731992096997/current, will proceed with Du for space computation calculation, 2024-11-19T04:55:09,776 WARN [Thread-889 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data10/current/BP-39784048-172.17.0.2-1731992096997/current, will proceed with Du for space computation calculation, 2024-11-19T04:55:09,792 WARN [Thread-877 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T04:55:09,795 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf73c7a4e407682ac with lease ID 0xb3ec429c80b48457: Processing first storage report for DS-96e7db7d-603b-4c2c-a816-38776992632a from datanode DatanodeRegistration(127.0.0.1:42129, datanodeUuid=b250df89-ba74-43a3-9854-61d807b9d04f, infoPort=39349, infoSecurePort=0, ipcPort=36265, storageInfo=lv=-57;cid=testClusterID;nsid=85150078;c=1731992096997) 2024-11-19T04:55:09,795 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf73c7a4e407682ac with lease ID 0xb3ec429c80b48457: from storage DS-96e7db7d-603b-4c2c-a816-38776992632a node DatanodeRegistration(127.0.0.1:42129, datanodeUuid=b250df89-ba74-43a3-9854-61d807b9d04f, infoPort=39349, infoSecurePort=0, ipcPort=36265, storageInfo=lv=-57;cid=testClusterID;nsid=85150078;c=1731992096997), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:55:09,795 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf73c7a4e407682ac with lease ID 0xb3ec429c80b48457: Processing first storage report for DS-f02ee31f-4f76-460f-80ab-8070a2546ddf from datanode DatanodeRegistration(127.0.0.1:42129, datanodeUuid=b250df89-ba74-43a3-9854-61d807b9d04f, infoPort=39349, infoSecurePort=0, ipcPort=36265, storageInfo=lv=-57;cid=testClusterID;nsid=85150078;c=1731992096997) 2024-11-19T04:55:09,795 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf73c7a4e407682ac with lease ID 0xb3ec429c80b48457: from storage DS-f02ee31f-4f76-460f-80ab-8070a2546ddf node DatanodeRegistration(127.0.0.1:42129, datanodeUuid=b250df89-ba74-43a3-9854-61d807b9d04f, infoPort=39349, infoSecurePort=0, ipcPort=36265, storageInfo=lv=-57;cid=testClusterID;nsid=85150078;c=1731992096997), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:55:09,896 WARN [ResponseProcessor for block BP-39784048-172.17.0.2-1731992096997:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-39784048-172.17.0.2-1731992096997:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:09,896 WARN [ResponseProcessor for block BP-39784048-172.17.0.2-1731992096997:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-39784048-172.17.0.2-1731992096997:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-39784048-172.17.0.2-1731992096997:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:38875,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:09,896 WARN [ResponseProcessor for block BP-39784048-172.17.0.2-1731992096997:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-39784048-172.17.0.2-1731992096997:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-39784048-172.17.0.2-1731992096997:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:38875,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:09,896 WARN [ResponseProcessor for block BP-39784048-172.17.0.2-1731992096997:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-39784048-172.17.0.2-1731992096997:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:09,897 WARN [DataStreamer for file /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 block BP-39784048-172.17.0.2-1731992096997:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK], DatanodeInfoWithStorage[127.0.0.1:38875,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38875,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK]) is bad. 2024-11-19T04:55:09,897 WARN [DataStreamer for file /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta block BP-39784048-172.17.0.2-1731992096997:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK], DatanodeInfoWithStorage[127.0.0.1:38875,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38875,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK]) is bad. 2024-11-19T04:55:09,897 WARN [DataStreamer for file /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/WALs/08a7f35e60d4,45139,1731992097985/08a7f35e60d4%2C45139%2C1731992097985.1731992098183 block BP-39784048-172.17.0.2-1731992096997:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38875,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK], DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38875,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK]) is bad. 2024-11-19T04:55:09,897 WARN [DataStreamer for file /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.1731992098471 block BP-39784048-172.17.0.2-1731992096997:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38875,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK], DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38875,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK]) is bad. 2024-11-19T04:55:09,898 WARN [PacketResponder: BP-39784048-172.17.0.2-1731992096997:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:38875] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:09,897 WARN [PacketResponder: BP-39784048-172.17.0.2-1731992096997:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:38875] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:09,899 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:60288 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:38875:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60288 dst: /127.0.0.1:38875 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:09,899 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_276590532_22 at /127.0.0.1:60252 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:38875:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60252 dst: /127.0.0.1:38875 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:09,899 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:60294 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:38875:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60294 dst: /127.0.0.1:38875 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:09,899 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:52008 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:43353:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52008 dst: /127.0.0.1:43353 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:09,900 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1109633094_22 at /127.0.0.1:60306 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:38875:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60306 dst: /127.0.0.1:38875 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:09,900 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:52004 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:43353:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52004 dst: /127.0.0.1:43353 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:09,901 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1109633094_22 at /127.0.0.1:52026 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:43353:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52026 dst: /127.0.0.1:43353 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:09,901 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@55c8142a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:55:09,902 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@65f2c48f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T04:55:09,902 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T04:55:09,902 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6471b09b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T04:55:09,902 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@198c3788{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/hadoop.log.dir/,STOPPED} 2024-11-19T04:55:09,903 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T04:55:09,903 WARN [BP-39784048-172.17.0.2-1731992096997 heartbeating to localhost/127.0.0.1:41423 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T04:55:09,903 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T04:55:09,903 WARN [BP-39784048-172.17.0.2-1731992096997 heartbeating to localhost/127.0.0.1:41423 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-39784048-172.17.0.2-1731992096997 (Datanode Uuid 85b1f523-508c-4d33-9736-eb00eb9ff733) service to localhost/127.0.0.1:41423 2024-11-19T04:55:09,904 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data3/current/BP-39784048-172.17.0.2-1731992096997 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:55:09,904 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data4/current/BP-39784048-172.17.0.2-1731992096997 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:55:09,904 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_276590532_22 at /127.0.0.1:51984 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:43353:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51984 dst: /127.0.0.1:43353 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:09,905 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T04:55:09,908 WARN [DataStreamer for file /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 block BP-39784048-172.17.0.2-1731992096997:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:09,908 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@e529189 {}] datanode.DataXceiver(331): 127.0.0.1:43353:DataXceiver error processing unknown operation src: /127.0.0.1:42228 dst: /127.0.0.1:43353 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:09,908 WARN [DataStreamer for file /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/WALs/08a7f35e60d4,45139,1731992097985/08a7f35e60d4%2C45139%2C1731992097985.1731992098183 block BP-39784048-172.17.0.2-1731992096997:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:09,909 WARN [DataStreamer for file /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta block BP-39784048-172.17.0.2-1731992096997:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:09,909 WARN [DataStreamer for file /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.1731992098471 block BP-39784048-172.17.0.2-1731992096997:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:09,915 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1aa07d80{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:55:09,915 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7c814f59{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T04:55:09,915 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T04:55:09,916 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5917cb43{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T04:55:09,916 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c3d2a60{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/hadoop.log.dir/,STOPPED} 2024-11-19T04:55:09,917 WARN [BP-39784048-172.17.0.2-1731992096997 heartbeating to localhost/127.0.0.1:41423 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T04:55:09,917 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T04:55:09,917 WARN [BP-39784048-172.17.0.2-1731992096997 heartbeating to localhost/127.0.0.1:41423 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-39784048-172.17.0.2-1731992096997 (Datanode Uuid 510b84df-c9b5-4629-b343-b6c817b1ee7c) service to localhost/127.0.0.1:41423 2024-11-19T04:55:09,917 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T04:55:09,918 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data1/current/BP-39784048-172.17.0.2-1731992096997 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:55:09,918 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data2/current/BP-39784048-172.17.0.2-1731992096997 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:55:09,919 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T04:55:09,922 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731992099159.f6a2e2ef47bf948c5ce3e0e7a516d28c., hostname=08a7f35e60d4,38579,1731992098058, seqNum=2] 2024-11-19T04:55:09,924 ERROR [FSHLog-0-hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7-prefix:08a7f35e60d4,38579,1731992098058 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:09,924 WARN [FSHLog-0-hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7-prefix:08a7f35e60d4,38579,1731992098058 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:09,924 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:09,924 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 08a7f35e60d4%2C38579%2C1731992098058:(num 1731992098471) roll requested 2024-11-19T04:55:09,925 INFO [regionserver/08a7f35e60d4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C38579%2C1731992098058.1731992109925 2024-11-19T04:55:09,931 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:09,931 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:09,931 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:09,931 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:09,931 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:09,931 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.1731992098471 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.1731992109925 2024-11-19T04:55:09,932 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:09,932 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:09,932 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37081:37081),(127.0.0.1/127.0.0.1:39349:39349)] 2024-11-19T04:55:09,932 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.1731992098471 is not closed yet, will try archiving it next time 2024-11-19T04:55:09,933 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-19T04:55:09,933 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-19T04:55:09,933 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.1731992098471 2024-11-19T04:55:09,937 WARN [IPC Server handler 3 on default port 41423 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.1731992098471 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-11-19T04:55:09,941 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.1731992098471 after 6ms 2024-11-19T04:55:10,163 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:11,117 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:11,932 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:11,934 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.1731992109925 2024-11-19T04:55:11,934 WARN [ResponseProcessor for block BP-39784048-172.17.0.2-1731992096997:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-39784048-172.17.0.2-1731992096997:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:11,935 WARN [DataStreamer for file /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.1731992109925 block BP-39784048-172.17.0.2-1731992096997:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK], DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK]) is bad. 2024-11-19T04:55:11,935 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:51028 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:44731:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51028 dst: /127.0.0.1:44731 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:11,936 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:40728 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:42129:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40728 dst: /127.0.0.1:42129 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:11,937 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@272698f5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:55:11,937 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@15bbd738{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T04:55:11,937 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T04:55:11,937 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@712f5f14{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T04:55:11,937 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@167a7fde{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/hadoop.log.dir/,STOPPED} 2024-11-19T04:55:11,939 WARN [BP-39784048-172.17.0.2-1731992096997 heartbeating to localhost/127.0.0.1:41423 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T04:55:11,939 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T04:55:11,939 WARN [BP-39784048-172.17.0.2-1731992096997 heartbeating to localhost/127.0.0.1:41423 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-39784048-172.17.0.2-1731992096997 (Datanode Uuid c235e46d-bbeb-4660-9d16-4cb9ad997b48) service to localhost/127.0.0.1:41423 2024-11-19T04:55:11,939 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T04:55:11,939 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data7/current/BP-39784048-172.17.0.2-1731992096997 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:55:11,939 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data8/current/BP-39784048-172.17.0.2-1731992096997 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:55:11,940 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T04:55:12,163 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:13,117 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:13,933 WARN [regionserver/08a7f35e60d4:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK]] 2024-11-19T04:55:13,933 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:13,933 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 08a7f35e60d4%2C38579%2C1731992098058:(num 1731992109925) roll requested 2024-11-19T04:55:13,934 INFO [regionserver/08a7f35e60d4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C38579%2C1731992098058.1731992113933 2024-11-19T04:55:13,937 WARN [Thread-909 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:13,937 WARN [Thread-909 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK], DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]) is bad. 2024-11-19T04:55:13,937 WARN [Thread-909 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741839_1021 2024-11-19T04:55:13,940 WARN [Thread-909 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK] 2024-11-19T04:55:13,942 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.1731992098471 after 4009ms 2024-11-19T04:55:13,944 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-19T04:55:13,947 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:13,947 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:13,947 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:13,948 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:13,948 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:13,948 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.1731992109925 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.1731992113933 2024-11-19T04:55:13,951 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43337:43337),(127.0.0.1/127.0.0.1:39349:39349)] 2024-11-19T04:55:13,951 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.1731992098471 is not closed yet, will try archiving it next time 2024-11-19T04:55:13,951 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.1731992109925 is not closed yet, will try archiving it next time 2024-11-19T04:55:13,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42129 is added to blk_1073741838_1020 (size=3600) 2024-11-19T04:55:14,163 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:14,353 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.1731992098471 is not closed yet, will try archiving it next time 2024-11-19T04:55:15,117 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:15,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741838_1020 (size=3600) 2024-11-19T04:55:15,948 WARN [ResponseProcessor for block BP-39784048-172.17.0.2-1731992096997:blk_1073741840_1022 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-39784048-172.17.0.2-1731992096997:blk_1073741840_1022 java.io.IOException: Bad response ERROR for BP-39784048-172.17.0.2-1731992096997:blk_1073741840_1022 from datanode DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:15,949 WARN [DataStreamer for file /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.1731992113933 block BP-39784048-172.17.0.2-1731992096997:blk_1073741840_1022 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35747,DS-5eda46f5-e4b4-4b90-9fdf-bb6edd28d6df,DISK], DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK]) is bad. 2024-11-19T04:55:15,949 WARN [PacketResponder: BP-39784048-172.17.0.2-1731992096997:blk_1073741840_1022, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:42129] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:15,949 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:36776 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:35747:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36776 dst: /127.0.0.1:35747 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:15,949 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:35462 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:42129:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35462 dst: /127.0.0.1:42129 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:15,950 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@28246fba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:55:15,951 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5056747b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T04:55:15,951 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T04:55:15,951 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@68004957{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T04:55:15,951 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1dc59954{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/hadoop.log.dir/,STOPPED} 2024-11-19T04:55:15,952 WARN [regionserver/08a7f35e60d4:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35747,DS-5eda46f5-e4b4-4b90-9fdf-bb6edd28d6df,DISK]] 2024-11-19T04:55:15,952 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:15,952 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 08a7f35e60d4%2C38579%2C1731992098058:(num 1731992113933) roll requested 2024-11-19T04:55:15,953 INFO [regionserver/08a7f35e60d4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C38579%2C1731992098058.1731992115952 2024-11-19T04:55:15,954 WARN [BP-39784048-172.17.0.2-1731992096997 heartbeating to localhost/127.0.0.1:41423 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T04:55:15,954 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T04:55:15,954 WARN [BP-39784048-172.17.0.2-1731992096997 heartbeating to localhost/127.0.0.1:41423 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-39784048-172.17.0.2-1731992096997 (Datanode Uuid b250df89-ba74-43a3-9854-61d807b9d04f) service to localhost/127.0.0.1:41423 2024-11-19T04:55:15,954 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T04:55:15,955 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data9/current/BP-39784048-172.17.0.2-1731992096997 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:55:15,955 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data10/current/BP-39784048-172.17.0.2-1731992096997 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:55:15,955 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T04:55:15,956 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1024 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:15,957 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741841_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK], DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]) is bad. 2024-11-19T04:55:15,957 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741841_1024 2024-11-19T04:55:15,957 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK] 2024-11-19T04:55:15,958 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:15,958 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38875,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK], DatanodeInfoWithStorage[127.0.0.1:35747,DS-5eda46f5-e4b4-4b90-9fdf-bb6edd28d6df,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38875,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK]) is bad. 2024-11-19T04:55:15,958 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741842_1025 2024-11-19T04:55:15,959 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38875,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK] 2024-11-19T04:55:15,960 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:15,960 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK], DatanodeInfoWithStorage[127.0.0.1:35747,DS-5eda46f5-e4b4-4b90-9fdf-bb6edd28d6df,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK]) is bad. 2024-11-19T04:55:15,960 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741843_1026 2024-11-19T04:55:15,961 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK] 2024-11-19T04:55:15,964 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:36806 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741844_1027] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data6]'}, localName='127.0.0.1:35747', datanodeUuid='25819d07-eb75-435a-ad81-b178b212ee16', xmitsInProgress=0}:Exception transferring block BP-39784048-172.17.0.2-1731992096997:blk_1073741844_1027 to mirror 127.0.0.1:44731 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:15,964 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44731 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:15,964 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:36806 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741844_1027] {}] datanode.BlockReceiver(316): Block 1073741844 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-19T04:55:15,964 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35747,DS-5eda46f5-e4b4-4b90-9fdf-bb6edd28d6df,DISK], DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK]) is bad. 2024-11-19T04:55:15,964 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741844_1027 2024-11-19T04:55:15,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38579 {}] regionserver.HRegion(8855): Flush requested on f6a2e2ef47bf948c5ce3e0e7a516d28c 2024-11-19T04:55:15,964 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:36806 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741844_1027] {}] datanode.DataXceiver(331): 127.0.0.1:35747:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36806 dst: /127.0.0.1:35747 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:15,964 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f6a2e2ef47bf948c5ce3e0e7a516d28c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T04:55:15,964 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK] 2024-11-19T04:55:15,965 WARN [IPC Server handler 1 on default port 41423 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T04:55:15,965 WARN [IPC Server handler 1 on default port 41423 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T04:55:15,965 WARN [IPC Server handler 1 on default port 41423 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T04:55:15,969 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:15,969 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:15,969 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:15,969 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:15,969 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:15,970 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.1731992113933 with entries=7, filesize=7.25 KB; new WAL /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.1731992115952 2024-11-19T04:55:15,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741840_1023 (size=7430) 2024-11-19T04:55:15,975 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43337:43337)] 2024-11-19T04:55:15,975 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.1731992098471 is not closed yet, will try archiving it next time 2024-11-19T04:55:15,975 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.1731992113933 is not closed yet, will try archiving it next time 2024-11-19T04:55:15,982 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/.tmp/info/3d2b4d73137c484b9701a8091390569f is 1080, key is row0002/info:/1731992111941/Put/seqid=0 2024-11-19T04:55:15,985 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43353 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:15,985 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:36822 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data6]'}, localName='127.0.0.1:35747', datanodeUuid='25819d07-eb75-435a-ad81-b178b212ee16', xmitsInProgress=0}:Exception transferring block BP-39784048-172.17.0.2-1731992096997:blk_1073741846_1029 to mirror 127.0.0.1:43353 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:15,985 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35747,DS-5eda46f5-e4b4-4b90-9fdf-bb6edd28d6df,DISK], DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]) is bad. 2024-11-19T04:55:15,985 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741846_1029 2024-11-19T04:55:15,985 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:36822 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T04:55:15,985 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:36822 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:35747:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36822 dst: /127.0.0.1:35747 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:15,986 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK] 2024-11-19T04:55:15,988 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38875 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:15,988 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:36830 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741847_1030] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data6]'}, localName='127.0.0.1:35747', datanodeUuid='25819d07-eb75-435a-ad81-b178b212ee16', xmitsInProgress=0}:Exception transferring block BP-39784048-172.17.0.2-1731992096997:blk_1073741847_1030 to mirror 127.0.0.1:38875 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:15,988 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35747,DS-5eda46f5-e4b4-4b90-9fdf-bb6edd28d6df,DISK], DatanodeInfoWithStorage[127.0.0.1:38875,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38875,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK]) is bad. 2024-11-19T04:55:15,988 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741847_1030 2024-11-19T04:55:15,988 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:36830 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741847_1030] {}] datanode.BlockReceiver(316): Block 1073741847 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T04:55:15,988 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:36830 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741847_1030] {}] datanode.DataXceiver(331): 127.0.0.1:35747:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36830 dst: /127.0.0.1:35747 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:15,989 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38875,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK] 2024-11-19T04:55:15,990 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:15,990 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK], DatanodeInfoWithStorage[127.0.0.1:35747,DS-5eda46f5-e4b4-4b90-9fdf-bb6edd28d6df,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK]) is bad. 2024-11-19T04:55:15,990 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741848_1031 2024-11-19T04:55:15,991 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK] 2024-11-19T04:55:15,993 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:36838 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741849_1032] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data6]'}, localName='127.0.0.1:35747', datanodeUuid='25819d07-eb75-435a-ad81-b178b212ee16', xmitsInProgress=0}:Exception transferring block BP-39784048-172.17.0.2-1731992096997:blk_1073741849_1032 to mirror 127.0.0.1:42129 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:15,993 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42129 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:15,993 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:36838 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741849_1032] {}] datanode.BlockReceiver(316): Block 1073741849 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T04:55:15,993 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35747,DS-5eda46f5-e4b4-4b90-9fdf-bb6edd28d6df,DISK], DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK]) is bad. 2024-11-19T04:55:15,993 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741849_1032 2024-11-19T04:55:15,993 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:36838 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741849_1032] {}] datanode.DataXceiver(331): 127.0.0.1:35747:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36838 dst: /127.0.0.1:35747 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:15,994 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK] 2024-11-19T04:55:15,994 WARN [IPC Server handler 1 on default port 41423 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T04:55:15,994 WARN [IPC Server handler 1 on default port 41423 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T04:55:15,995 WARN [IPC Server handler 1 on default port 41423 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T04:55:15,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741850_1033 (size=10347) 2024-11-19T04:55:16,164 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:16,372 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.1731992098471 is not closed yet, will try archiving it next time 2024-11-19T04:55:16,398 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/.tmp/info/3d2b4d73137c484b9701a8091390569f 2024-11-19T04:55:16,405 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/.tmp/info/3d2b4d73137c484b9701a8091390569f as hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/info/3d2b4d73137c484b9701a8091390569f 2024-11-19T04:55:16,411 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/info/3d2b4d73137c484b9701a8091390569f, entries=5, sequenceid=11, filesize=10.1 K 2024-11-19T04:55:16,412 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for f6a2e2ef47bf948c5ce3e0e7a516d28c in 448ms, sequenceid=11, compaction requested=false 2024-11-19T04:55:16,412 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f6a2e2ef47bf948c5ce3e0e7a516d28c: 2024-11-19T04:55:16,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38579 {}] regionserver.HRegion(8855): Flush requested on f6a2e2ef47bf948c5ce3e0e7a516d28c 2024-11-19T04:55:16,590 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f6a2e2ef47bf948c5ce3e0e7a516d28c 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-19T04:55:16,594 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/.tmp/info/f2e562f5978948e18bb69517afdedbda is 1080, key is row0007/info:/1731992115965/Put/seqid=0 2024-11-19T04:55:16,596 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:16,596 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38875,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK], DatanodeInfoWithStorage[127.0.0.1:35747,DS-5eda46f5-e4b4-4b90-9fdf-bb6edd28d6df,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38875,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK]) is bad. 2024-11-19T04:55:16,596 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741851_1034 2024-11-19T04:55:16,597 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38875,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK] 2024-11-19T04:55:16,598 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:16,598 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK], DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]) is bad. 2024-11-19T04:55:16,598 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741852_1035 2024-11-19T04:55:16,598 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK] 2024-11-19T04:55:16,601 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42129 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:16,601 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:36864 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741853_1036] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data6]'}, localName='127.0.0.1:35747', datanodeUuid='25819d07-eb75-435a-ad81-b178b212ee16', xmitsInProgress=0}:Exception transferring block BP-39784048-172.17.0.2-1731992096997:blk_1073741853_1036 to mirror 127.0.0.1:42129 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:16,601 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35747,DS-5eda46f5-e4b4-4b90-9fdf-bb6edd28d6df,DISK], DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK]) is bad. 2024-11-19T04:55:16,601 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741853_1036 2024-11-19T04:55:16,601 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:36864 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741853_1036] {}] datanode.BlockReceiver(316): Block 1073741853 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T04:55:16,601 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:36864 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741853_1036] {}] datanode.DataXceiver(331): 127.0.0.1:35747:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36864 dst: /127.0.0.1:35747 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:16,602 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK] 2024-11-19T04:55:16,603 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:16,603 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK], DatanodeInfoWithStorage[127.0.0.1:35747,DS-5eda46f5-e4b4-4b90-9fdf-bb6edd28d6df,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK]) is bad. 2024-11-19T04:55:16,603 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741854_1037 2024-11-19T04:55:16,603 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK] 2024-11-19T04:55:16,604 WARN [IPC Server handler 0 on default port 41423 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T04:55:16,604 WARN [IPC Server handler 0 on default port 41423 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T04:55:16,604 WARN [IPC Server handler 0 on default port 41423 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T04:55:16,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741855_1038 (size=12506) 2024-11-19T04:55:17,007 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/.tmp/info/f2e562f5978948e18bb69517afdedbda 2024-11-19T04:55:17,014 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/.tmp/info/f2e562f5978948e18bb69517afdedbda as hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/info/f2e562f5978948e18bb69517afdedbda 2024-11-19T04:55:17,021 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/info/f2e562f5978948e18bb69517afdedbda, entries=7, sequenceid=24, filesize=12.2 K 2024-11-19T04:55:17,022 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for f6a2e2ef47bf948c5ce3e0e7a516d28c in 432ms, sequenceid=24, compaction requested=false 2024-11-19T04:55:17,022 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f6a2e2ef47bf948c5ce3e0e7a516d28c: 2024-11-19T04:55:17,022 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-19T04:55:17,023 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T04:55:17,023 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/info/f2e562f5978948e18bb69517afdedbda because midkey is the same as first or last row 2024-11-19T04:55:17,118 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:17,975 WARN [regionserver/08a7f35e60d4:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35747,DS-5eda46f5-e4b4-4b90-9fdf-bb6edd28d6df,DISK]] 2024-11-19T04:55:17,975 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:17,975 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 08a7f35e60d4%2C38579%2C1731992098058:(num 1731992115952) roll requested 2024-11-19T04:55:17,976 INFO [regionserver/08a7f35e60d4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C38579%2C1731992098058.1731992117976 2024-11-19T04:55:17,979 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:17,979 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK], DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK]) is bad. 2024-11-19T04:55:17,979 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741856_1039 2024-11-19T04:55:17,980 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK] 2024-11-19T04:55:17,981 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:17,982 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK], DatanodeInfoWithStorage[127.0.0.1:38875,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK]) is bad. 2024-11-19T04:55:17,982 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741857_1040 2024-11-19T04:55:17,982 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK] 2024-11-19T04:55:17,985 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43353 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:17,985 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:36886 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741858_1041] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data6]'}, localName='127.0.0.1:35747', datanodeUuid='25819d07-eb75-435a-ad81-b178b212ee16', xmitsInProgress=0}:Exception transferring block BP-39784048-172.17.0.2-1731992096997:blk_1073741858_1041 to mirror 127.0.0.1:43353 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:17,985 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35747,DS-5eda46f5-e4b4-4b90-9fdf-bb6edd28d6df,DISK], DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]) is bad. 2024-11-19T04:55:17,985 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741858_1041 2024-11-19T04:55:17,985 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:36886 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741858_1041] {}] datanode.BlockReceiver(316): Block 1073741858 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-19T04:55:17,985 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:36886 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741858_1041] {}] datanode.DataXceiver(331): 127.0.0.1:35747:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36886 dst: /127.0.0.1:35747 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:17,985 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK] 2024-11-19T04:55:17,988 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38875 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:17,988 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:36888 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741859_1042] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data6]'}, localName='127.0.0.1:35747', datanodeUuid='25819d07-eb75-435a-ad81-b178b212ee16', xmitsInProgress=0}:Exception transferring block BP-39784048-172.17.0.2-1731992096997:blk_1073741859_1042 to mirror 127.0.0.1:38875 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:17,988 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35747,DS-5eda46f5-e4b4-4b90-9fdf-bb6edd28d6df,DISK], DatanodeInfoWithStorage[127.0.0.1:38875,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38875,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK]) is bad. 2024-11-19T04:55:17,988 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741859_1042 2024-11-19T04:55:17,988 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:36888 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741859_1042] {}] datanode.BlockReceiver(316): Block 1073741859 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-19T04:55:17,988 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:36888 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741859_1042] {}] datanode.DataXceiver(331): 127.0.0.1:35747:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36888 dst: /127.0.0.1:35747 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:17,988 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38875,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK] 2024-11-19T04:55:17,989 WARN [IPC Server handler 3 on default port 41423 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T04:55:17,989 WARN [IPC Server handler 3 on default port 41423 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T04:55:17,989 WARN [IPC Server handler 3 on default port 41423 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T04:55:17,991 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:17,992 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:17,992 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:17,992 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:17,992 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:17,992 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.1731992115952 with entries=17, filesize=17.07 KB; new WAL /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.1731992117976 2024-11-19T04:55:17,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741845_1028 (size=17486) 2024-11-19T04:55:17,998 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43337:43337)] 2024-11-19T04:55:17,998 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.1731992098471 is not closed yet, will try archiving it next time 2024-11-19T04:55:17,998 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.1731992115952 is not closed yet, will try archiving it next time 2024-11-19T04:55:18,001 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.1731992109925 to hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/oldWALs/08a7f35e60d4%2C38579%2C1731992098058.1731992109925 2024-11-19T04:55:18,002 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.1731992113933 to hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/oldWALs/08a7f35e60d4%2C38579%2C1731992098058.1731992113933 2024-11-19T04:55:18,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38579 {}] regionserver.HRegion(8855): Flush requested on f6a2e2ef47bf948c5ce3e0e7a516d28c 2024-11-19T04:55:18,011 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f6a2e2ef47bf948c5ce3e0e7a516d28c 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-19T04:55:18,015 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/.tmp/info/118f4a12b35242d9a8aebcf7e684cc64 is 1079, key is tmprow/info:/1731992118009/Put/seqid=0 2024-11-19T04:55:18,017 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:18,017 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK], DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK]) is bad. 2024-11-19T04:55:18,017 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741861_1044 2024-11-19T04:55:18,018 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK] 2024-11-19T04:55:18,019 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:18,019 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38875,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK], DatanodeInfoWithStorage[127.0.0.1:35747,DS-5eda46f5-e4b4-4b90-9fdf-bb6edd28d6df,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38875,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK]) is bad. 2024-11-19T04:55:18,019 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741862_1045 2024-11-19T04:55:18,020 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38875,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK] 2024-11-19T04:55:18,021 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:18,021 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK], DatanodeInfoWithStorage[127.0.0.1:35747,DS-5eda46f5-e4b4-4b90-9fdf-bb6edd28d6df,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]) is bad. 2024-11-19T04:55:18,021 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741863_1046 2024-11-19T04:55:18,022 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK] 2024-11-19T04:55:18,023 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:18,023 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK], DatanodeInfoWithStorage[127.0.0.1:35747,DS-5eda46f5-e4b4-4b90-9fdf-bb6edd28d6df,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK]) is bad. 2024-11-19T04:55:18,023 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741864_1047 2024-11-19T04:55:18,023 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK] 2024-11-19T04:55:18,024 WARN [IPC Server handler 0 on default port 41423 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T04:55:18,024 WARN [IPC Server handler 0 on default port 41423 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T04:55:18,024 WARN [IPC Server handler 0 on default port 41423 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T04:55:18,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741865_1048 (size=6027) 2024-11-19T04:55:18,164 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:18,395 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.1731992098471 is not closed yet, will try archiving it next time 2024-11-19T04:55:18,428 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/.tmp/info/118f4a12b35242d9a8aebcf7e684cc64 2024-11-19T04:55:18,435 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/.tmp/info/118f4a12b35242d9a8aebcf7e684cc64 as hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/info/118f4a12b35242d9a8aebcf7e684cc64 2024-11-19T04:55:18,442 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/info/118f4a12b35242d9a8aebcf7e684cc64, entries=1, sequenceid=34, filesize=5.9 K 2024-11-19T04:55:18,443 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for f6a2e2ef47bf948c5ce3e0e7a516d28c in 433ms, sequenceid=34, compaction requested=true 2024-11-19T04:55:18,443 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f6a2e2ef47bf948c5ce3e0e7a516d28c: 2024-11-19T04:55:18,444 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-19T04:55:18,444 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T04:55:18,444 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/info/f2e562f5978948e18bb69517afdedbda because midkey is the same as first or last row 2024-11-19T04:55:18,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f6a2e2ef47bf948c5ce3e0e7a516d28c:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T04:55:18,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T04:55:18,444 DEBUG [RS:0;08a7f35e60d4:38579-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T04:55:18,445 DEBUG [RS:0;08a7f35e60d4:38579-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T04:55:18,445 DEBUG [RS:0;08a7f35e60d4:38579-shortCompactions-0 {}] regionserver.HStore(1541): f6a2e2ef47bf948c5ce3e0e7a516d28c/info is initiating minor compaction (all files) 2024-11-19T04:55:18,445 INFO [RS:0;08a7f35e60d4:38579-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f6a2e2ef47bf948c5ce3e0e7a516d28c/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731992099159.f6a2e2ef47bf948c5ce3e0e7a516d28c. 2024-11-19T04:55:18,446 INFO [RS:0;08a7f35e60d4:38579-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/info/3d2b4d73137c484b9701a8091390569f, hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/info/f2e562f5978948e18bb69517afdedbda, hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/info/118f4a12b35242d9a8aebcf7e684cc64] into tmpdir=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/.tmp, totalSize=28.2 K 2024-11-19T04:55:18,446 DEBUG [RS:0;08a7f35e60d4:38579-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3d2b4d73137c484b9701a8091390569f, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731992111941 2024-11-19T04:55:18,447 DEBUG [RS:0;08a7f35e60d4:38579-shortCompactions-0 {}] compactions.Compactor(225): Compacting f2e562f5978948e18bb69517afdedbda, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1731992115965 2024-11-19T04:55:18,447 DEBUG [RS:0;08a7f35e60d4:38579-shortCompactions-0 {}] compactions.Compactor(225): Compacting 118f4a12b35242d9a8aebcf7e684cc64, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731992118009 2024-11-19T04:55:18,462 INFO [RS:0;08a7f35e60d4:38579-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f6a2e2ef47bf948c5ce3e0e7a516d28c#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T04:55:18,463 DEBUG [RS:0;08a7f35e60d4:38579-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/.tmp/info/7c9b3a118e8f49e593e64fe53cd3b88a is 1080, key is row0002/info:/1731992111941/Put/seqid=0 2024-11-19T04:55:18,465 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:36936 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741866_1049] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data6]'}, localName='127.0.0.1:35747', datanodeUuid='25819d07-eb75-435a-ad81-b178b212ee16', xmitsInProgress=0}:Exception transferring block BP-39784048-172.17.0.2-1731992096997:blk_1073741866_1049 to mirror 127.0.0.1:44731 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:18,466 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44731 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:18,466 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:36936 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741866_1049] {}] datanode.BlockReceiver(316): Block 1073741866 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T04:55:18,466 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35747,DS-5eda46f5-e4b4-4b90-9fdf-bb6edd28d6df,DISK], DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK]) is bad. 2024-11-19T04:55:18,466 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741866_1049 2024-11-19T04:55:18,466 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:36936 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741866_1049] {}] datanode.DataXceiver(331): 127.0.0.1:35747:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36936 dst: /127.0.0.1:35747 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:18,466 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK] 2024-11-19T04:55:18,468 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43353 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:18,468 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:36946 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741867_1050] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data6]'}, localName='127.0.0.1:35747', datanodeUuid='25819d07-eb75-435a-ad81-b178b212ee16', xmitsInProgress=0}:Exception transferring block BP-39784048-172.17.0.2-1731992096997:blk_1073741867_1050 to mirror 127.0.0.1:43353 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:18,469 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35747,DS-5eda46f5-e4b4-4b90-9fdf-bb6edd28d6df,DISK], DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]) is bad. 2024-11-19T04:55:18,469 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741867_1050 2024-11-19T04:55:18,469 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:36946 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741867_1050] {}] datanode.BlockReceiver(316): Block 1073741867 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T04:55:18,469 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:36946 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741867_1050] {}] datanode.DataXceiver(331): 127.0.0.1:35747:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36946 dst: /127.0.0.1:35747 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:18,469 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK] 2024-11-19T04:55:18,470 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:18,470 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38875,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK], DatanodeInfoWithStorage[127.0.0.1:35747,DS-5eda46f5-e4b4-4b90-9fdf-bb6edd28d6df,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38875,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK]) is bad. 2024-11-19T04:55:18,470 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741868_1051 2024-11-19T04:55:18,471 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38875,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK] 2024-11-19T04:55:18,472 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:18,472 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK], DatanodeInfoWithStorage[127.0.0.1:35747,DS-5eda46f5-e4b4-4b90-9fdf-bb6edd28d6df,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK]) is bad. 2024-11-19T04:55:18,472 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741869_1052 2024-11-19T04:55:18,472 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK] 2024-11-19T04:55:18,473 WARN [IPC Server handler 3 on default port 41423 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T04:55:18,473 WARN [IPC Server handler 3 on default port 41423 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T04:55:18,473 WARN [IPC Server handler 3 on default port 41423 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T04:55:18,475 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@625232db[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35747, datanodeUuid=25819d07-eb75-435a-ad81-b178b212ee16, infoPort=43337, infoSecurePort=0, ipcPort=46517, storageInfo=lv=-57;cid=testClusterID;nsid=85150078;c=1731992096997):Failed to transfer BP-39784048-172.17.0.2-1731992096997:blk_1073741840_1023 to 127.0.0.1:43353 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:18,479 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6c95d0e1[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35747, datanodeUuid=25819d07-eb75-435a-ad81-b178b212ee16, infoPort=43337, infoSecurePort=0, ipcPort=46517, storageInfo=lv=-57;cid=testClusterID;nsid=85150078;c=1731992096997):Failed to transfer BP-39784048-172.17.0.2-1731992096997:blk_1073741850_1033 to 127.0.0.1:38875 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:18,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741870_1053 (size=17994) 2024-11-19T04:55:18,891 DEBUG [RS:0;08a7f35e60d4:38579-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/.tmp/info/7c9b3a118e8f49e593e64fe53cd3b88a as hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/info/7c9b3a118e8f49e593e64fe53cd3b88a 2024-11-19T04:55:18,901 INFO [RS:0;08a7f35e60d4:38579-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f6a2e2ef47bf948c5ce3e0e7a516d28c/info of f6a2e2ef47bf948c5ce3e0e7a516d28c into 7c9b3a118e8f49e593e64fe53cd3b88a(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T04:55:18,901 DEBUG [RS:0;08a7f35e60d4:38579-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f6a2e2ef47bf948c5ce3e0e7a516d28c: 2024-11-19T04:55:18,901 INFO [RS:0;08a7f35e60d4:38579-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731992099159.f6a2e2ef47bf948c5ce3e0e7a516d28c., storeName=f6a2e2ef47bf948c5ce3e0e7a516d28c/info, priority=13, startTime=1731992118444; duration=0sec 2024-11-19T04:55:18,901 DEBUG [RS:0;08a7f35e60d4:38579-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-19T04:55:18,901 DEBUG [RS:0;08a7f35e60d4:38579-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T04:55:18,902 DEBUG [RS:0;08a7f35e60d4:38579-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/info/7c9b3a118e8f49e593e64fe53cd3b88a because midkey is the same as first or last row 2024-11-19T04:55:18,902 DEBUG [RS:0;08a7f35e60d4:38579-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-19T04:55:18,902 DEBUG [RS:0;08a7f35e60d4:38579-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T04:55:18,902 DEBUG [RS:0;08a7f35e60d4:38579-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/info/7c9b3a118e8f49e593e64fe53cd3b88a because midkey is the same as first or last row 2024-11-19T04:55:18,902 DEBUG [RS:0;08a7f35e60d4:38579-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-19T04:55:18,902 DEBUG [RS:0;08a7f35e60d4:38579-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T04:55:18,902 DEBUG [RS:0;08a7f35e60d4:38579-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/info/7c9b3a118e8f49e593e64fe53cd3b88a because midkey is the same as first or last row 2024-11-19T04:55:18,902 DEBUG [RS:0;08a7f35e60d4:38579-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T04:55:18,902 DEBUG [RS:0;08a7f35e60d4:38579-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f6a2e2ef47bf948c5ce3e0e7a516d28c:info 2024-11-19T04:55:19,118 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:19,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38579 {}] regionserver.HRegion(8855): Flush requested on f6a2e2ef47bf948c5ce3e0e7a516d28c 2024-11-19T04:55:19,434 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f6a2e2ef47bf948c5ce3e0e7a516d28c 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-19T04:55:19,440 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/.tmp/info/dc5e4ac1b3064c1c88caf63551da5e7a is 1079, key is tmprow/info:/1731992119432/Put/seqid=0 2024-11-19T04:55:19,442 WARN [Thread-960 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:19,443 WARN [Thread-960 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK], DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]) is bad. 2024-11-19T04:55:19,443 WARN [Thread-960 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741871_1054 2024-11-19T04:55:19,443 WARN [Thread-960 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK] 2024-11-19T04:55:19,445 WARN [Thread-960 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:19,445 WARN [Thread-960 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK], DatanodeInfoWithStorage[127.0.0.1:38875,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK]) is bad. 2024-11-19T04:55:19,445 WARN [Thread-960 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741872_1055 2024-11-19T04:55:19,446 WARN [Thread-960 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK] 2024-11-19T04:55:19,449 WARN [Thread-960 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38875 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:19,449 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:36964 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741873_1056] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data6]'}, localName='127.0.0.1:35747', datanodeUuid='25819d07-eb75-435a-ad81-b178b212ee16', xmitsInProgress=0}:Exception transferring block BP-39784048-172.17.0.2-1731992096997:blk_1073741873_1056 to mirror 127.0.0.1:38875 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:19,449 WARN [Thread-960 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35747,DS-5eda46f5-e4b4-4b90-9fdf-bb6edd28d6df,DISK], DatanodeInfoWithStorage[127.0.0.1:38875,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38875,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK]) is bad. 2024-11-19T04:55:19,449 WARN [Thread-960 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741873_1056 2024-11-19T04:55:19,449 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:36964 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741873_1056] {}] datanode.BlockReceiver(316): Block 1073741873 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T04:55:19,449 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:36964 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741873_1056] {}] datanode.DataXceiver(331): 127.0.0.1:35747:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36964 dst: /127.0.0.1:35747 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:19,450 WARN [Thread-960 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38875,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK] 2024-11-19T04:55:19,453 WARN [Thread-960 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42129 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:19,453 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:36966 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741874_1057] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data6]'}, localName='127.0.0.1:35747', datanodeUuid='25819d07-eb75-435a-ad81-b178b212ee16', xmitsInProgress=0}:Exception transferring block BP-39784048-172.17.0.2-1731992096997:blk_1073741874_1057 to mirror 127.0.0.1:42129 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:19,453 WARN [Thread-960 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35747,DS-5eda46f5-e4b4-4b90-9fdf-bb6edd28d6df,DISK], DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK]) is bad. 2024-11-19T04:55:19,453 WARN [Thread-960 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741874_1057 2024-11-19T04:55:19,453 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:36966 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741874_1057] {}] datanode.BlockReceiver(316): Block 1073741874 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T04:55:19,453 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:36966 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741874_1057] {}] datanode.DataXceiver(331): 127.0.0.1:35747:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36966 dst: /127.0.0.1:35747 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:19,454 WARN [Thread-960 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK] 2024-11-19T04:55:19,454 WARN [IPC Server handler 2 on default port 41423 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T04:55:19,455 WARN [IPC Server handler 2 on default port 41423 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T04:55:19,455 WARN [IPC Server handler 2 on default port 41423 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T04:55:19,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741875_1058 (size=6027) 2024-11-19T04:55:19,474 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6c95d0e1[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35747, datanodeUuid=25819d07-eb75-435a-ad81-b178b212ee16, infoPort=43337, infoSecurePort=0, ipcPort=46517, storageInfo=lv=-57;cid=testClusterID;nsid=85150078;c=1731992096997):Failed to transfer BP-39784048-172.17.0.2-1731992096997:blk_1073741855_1038 to 127.0.0.1:38875 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:19,475 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@625232db[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35747, datanodeUuid=25819d07-eb75-435a-ad81-b178b212ee16, infoPort=43337, infoSecurePort=0, ipcPort=46517, storageInfo=lv=-57;cid=testClusterID;nsid=85150078;c=1731992096997):Failed to transfer BP-39784048-172.17.0.2-1731992096997:blk_1073741845_1028 to 127.0.0.1:43353 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:19,863 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/.tmp/info/dc5e4ac1b3064c1c88caf63551da5e7a 2024-11-19T04:55:19,870 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/.tmp/info/dc5e4ac1b3064c1c88caf63551da5e7a as hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/info/dc5e4ac1b3064c1c88caf63551da5e7a 2024-11-19T04:55:19,875 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/info/dc5e4ac1b3064c1c88caf63551da5e7a, entries=1, sequenceid=45, filesize=5.9 K 2024-11-19T04:55:19,876 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for f6a2e2ef47bf948c5ce3e0e7a516d28c in 442ms, sequenceid=45, compaction requested=false 2024-11-19T04:55:19,876 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f6a2e2ef47bf948c5ce3e0e7a516d28c: 2024-11-19T04:55:19,877 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-19T04:55:19,877 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T04:55:19,877 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/info/7c9b3a118e8f49e593e64fe53cd3b88a because midkey is the same as first or last row 2024-11-19T04:55:20,001 WARN [regionserver/08a7f35e60d4:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-19T04:55:20,001 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:20,050 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:55:20,057 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T04:55:20,060 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T04:55:20,060 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T04:55:20,060 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T04:55:20,062 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@568b1686{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/hadoop.log.dir/,AVAILABLE} 2024-11-19T04:55:20,063 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16a2580d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T04:55:20,165 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:20,192 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2852206a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/java.io.tmpdir/jetty-localhost-45845-hadoop-hdfs-3_4_1-tests_jar-_-any-18116948684688820987/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:55:20,193 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@184e2f57{HTTP/1.1, (http/1.1)}{localhost:45845} 2024-11-19T04:55:20,193 INFO [Time-limited test {}] server.Server(415): Started @128237ms 2024-11-19T04:55:20,194 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T04:55:20,345 WARN [Thread-980 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T04:55:20,354 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x79665449514e8ec with lease ID 0xb3ec429c80b48458: from storage DS-937871f2-4c41-459f-abfb-7cc01ab45ce0 node DatanodeRegistration(127.0.0.1:38163, datanodeUuid=85b1f523-508c-4d33-9736-eb00eb9ff733, infoPort=43321, infoSecurePort=0, ipcPort=39909, storageInfo=lv=-57;cid=testClusterID;nsid=85150078;c=1731992096997), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-19T04:55:20,354 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x79665449514e8ec with lease ID 0xb3ec429c80b48458: from storage DS-dd2f7d9a-e4bb-46ef-b82c-e580068999d8 node DatanodeRegistration(127.0.0.1:38163, datanodeUuid=85b1f523-508c-4d33-9736-eb00eb9ff733, infoPort=43321, infoSecurePort=0, ipcPort=39909, storageInfo=lv=-57;cid=testClusterID;nsid=85150078;c=1731992096997), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:55:21,118 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:21,476 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6c95d0e1[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35747, datanodeUuid=25819d07-eb75-435a-ad81-b178b212ee16, infoPort=43337, infoSecurePort=0, ipcPort=46517, storageInfo=lv=-57;cid=testClusterID;nsid=85150078;c=1731992096997):Failed to transfer BP-39784048-172.17.0.2-1731992096997:blk_1073741870_1053 to 127.0.0.1:44731 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:21,476 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@625232db[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35747, datanodeUuid=25819d07-eb75-435a-ad81-b178b212ee16, infoPort=43337, infoSecurePort=0, ipcPort=46517, storageInfo=lv=-57;cid=testClusterID;nsid=85150078;c=1731992096997):Failed to transfer BP-39784048-172.17.0.2-1731992096997:blk_1073741865_1048 to 127.0.0.1:44731 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:22,001 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:22,165 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:22,475 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@625232db[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35747, datanodeUuid=25819d07-eb75-435a-ad81-b178b212ee16, infoPort=43337, infoSecurePort=0, ipcPort=46517, storageInfo=lv=-57;cid=testClusterID;nsid=85150078;c=1731992096997):Failed to transfer BP-39784048-172.17.0.2-1731992096997:blk_1073741875_1058 to 127.0.0.1:44731 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:23,119 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:24,002 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:24,166 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:25,120 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:26,002 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:26,166 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:27,120 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:27,965 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T04:55:28,003 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:28,166 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:28,269 ERROR [FSHLog-0-hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData-prefix:08a7f35e60d4,45139,1731992097985 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:28,269 WARN [FSHLog-0-hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData-prefix:08a7f35e60d4,45139,1731992097985 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:28,269 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 08a7f35e60d4%2C45139%2C1731992097985:(num 1731992098183) roll requested 2024-11-19T04:55:28,270 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C45139%2C1731992097985.1731992128270 2024-11-19T04:55:28,273 WARN [Thread-1000 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:28,273 WARN [Thread-1000 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK], DatanodeInfoWithStorage[127.0.0.1:38163,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]) is bad. 2024-11-19T04:55:28,273 WARN [Thread-1000 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741876_1059 2024-11-19T04:55:28,273 WARN [Thread-1000 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK] 2024-11-19T04:55:28,275 WARN [Thread-1000 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:28,275 WARN [Thread-1000 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK], DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK]) is bad. 2024-11-19T04:55:28,275 WARN [Thread-1000 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741877_1060 2024-11-19T04:55:28,275 WARN [Thread-1000 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK] 2024-11-19T04:55:28,277 WARN [Thread-1000 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44731 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:28,277 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_276590532_22 at /127.0.0.1:46108 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741878_1061] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data4]'}, localName='127.0.0.1:38163', datanodeUuid='85b1f523-508c-4d33-9736-eb00eb9ff733', xmitsInProgress=0}:Exception transferring block BP-39784048-172.17.0.2-1731992096997:blk_1073741878_1061 to mirror 127.0.0.1:44731 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:28,278 WARN [Thread-1000 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38163,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK], DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK]) is bad. 2024-11-19T04:55:28,278 WARN [Thread-1000 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741878_1061 2024-11-19T04:55:28,278 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_276590532_22 at /127.0.0.1:46108 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741878_1061] {}] datanode.BlockReceiver(316): Block 1073741878 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-19T04:55:28,278 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_276590532_22 at /127.0.0.1:46108 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741878_1061] {}] datanode.DataXceiver(331): 127.0.0.1:38163:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46108 dst: /127.0.0.1:38163 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:28,278 WARN [Thread-1000 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK] 2024-11-19T04:55:28,282 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:28,282 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:28,282 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:28,282 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:28,282 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:28,282 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/WALs/08a7f35e60d4,45139,1731992097985/08a7f35e60d4%2C45139%2C1731992097985.1731992098183 with entries=54, filesize=26.68 KB; new WAL /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/WALs/08a7f35e60d4,45139,1731992097985/08a7f35e60d4%2C45139%2C1731992097985.1731992128270 2024-11-19T04:55:28,283 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:28,283 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:28,283 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/WALs/08a7f35e60d4,45139,1731992097985/08a7f35e60d4%2C45139%2C1731992097985.1731992098183 2024-11-19T04:55:28,283 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43337:43337),(127.0.0.1/127.0.0.1:43321:43321)] 2024-11-19T04:55:28,283 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/WALs/08a7f35e60d4,45139,1731992097985/08a7f35e60d4%2C45139%2C1731992097985.1731992098183 is not closed yet, will try archiving it next time 2024-11-19T04:55:28,283 WARN [IPC Server handler 4 on default port 41423 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/WALs/08a7f35e60d4,45139,1731992097985/08a7f35e60d4%2C45139%2C1731992097985.1731992098183 has not been closed. Lease recovery is in progress. RecoveryId = 1063 for block blk_1073741830_1006 2024-11-19T04:55:28,284 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/WALs/08a7f35e60d4,45139,1731992097985/08a7f35e60d4%2C45139%2C1731992097985.1731992098183 after 1ms 2024-11-19T04:55:29,120 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:30,003 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:30,370 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@d68bf71 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-39784048-172.17.0.2-1731992096997:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:43353,null,null]) java.net.ConnectException: Call From 08a7f35e60d4/172.17.0.2 to localhost:39473 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-19T04:55:30,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741833_1019 (size=455) 2024-11-19T04:55:30,959 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.1731992098471 to hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/oldWALs/08a7f35e60d4%2C38579%2C1731992098058.1731992098471 2024-11-19T04:55:30,960 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.1731992115952 to hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/oldWALs/08a7f35e60d4%2C38579%2C1731992098058.1731992115952 2024-11-19T04:55:31,121 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:32,003 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:32,285 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/WALs/08a7f35e60d4,45139,1731992097985/08a7f35e60d4%2C45139%2C1731992097985.1731992098183 after 4002ms 2024-11-19T04:55:33,121 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:33,350 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@259cf0cb[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38163, datanodeUuid=85b1f523-508c-4d33-9736-eb00eb9ff733, infoPort=43321, infoSecurePort=0, ipcPort=39909, storageInfo=lv=-57;cid=testClusterID;nsid=85150078;c=1731992096997):Failed to transfer BP-39784048-172.17.0.2-1731992096997:blk_1073741833_1019 to 127.0.0.1:44731 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:34,004 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:35,121 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:35,845 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C38579%2C1731992098058.1731992135845 2024-11-19T04:55:35,848 WARN [Thread-1012 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1064 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:35,848 WARN [Thread-1012 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741880_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK], DatanodeInfoWithStorage[127.0.0.1:38163,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK]) is bad. 2024-11-19T04:55:35,848 WARN [Thread-1012 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741880_1064 2024-11-19T04:55:35,848 WARN [Thread-1012 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK] 2024-11-19T04:55:35,851 WARN [Thread-1012 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1065 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44731 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:35,851 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_276590532_22 at /127.0.0.1:44708 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741881_1065] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data6]'}, localName='127.0.0.1:35747', datanodeUuid='25819d07-eb75-435a-ad81-b178b212ee16', xmitsInProgress=0}:Exception transferring block BP-39784048-172.17.0.2-1731992096997:blk_1073741881_1065 to mirror 127.0.0.1:44731 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:35,851 WARN [Thread-1012 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741881_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35747,DS-5eda46f5-e4b4-4b90-9fdf-bb6edd28d6df,DISK], DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK]) is bad. 2024-11-19T04:55:35,851 WARN [Thread-1012 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741881_1065 2024-11-19T04:55:35,851 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_276590532_22 at /127.0.0.1:44708 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741881_1065] {}] datanode.BlockReceiver(316): Block 1073741881 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-19T04:55:35,851 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_276590532_22 at /127.0.0.1:44708 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741881_1065] {}] datanode.DataXceiver(331): 127.0.0.1:35747:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44708 dst: /127.0.0.1:35747 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:35,851 WARN [Thread-1012 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK] 2024-11-19T04:55:35,855 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:35,855 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:35,856 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:35,856 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:35,856 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:35,856 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.1731992117976 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.1731992135845 2024-11-19T04:55:35,857 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43337:43337),(127.0.0.1/127.0.0.1:43321:43321)] 2024-11-19T04:55:35,857 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.1731992117976 is not closed yet, will try archiving it next time 2024-11-19T04:55:35,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741860_1043 (size=13591) 2024-11-19T04:55:35,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38579 {}] regionserver.HRegion(8855): Flush requested on f6a2e2ef47bf948c5ce3e0e7a516d28c 2024-11-19T04:55:35,867 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f6a2e2ef47bf948c5ce3e0e7a516d28c 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-19T04:55:35,872 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/.tmp/info/e85f698db428402e8d96e13494e991de is 1080, key is row0013/info:/1731992135858/Put/seqid=0 2024-11-19T04:55:35,874 WARN [Thread-1019 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:35,874 WARN [Thread-1019 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741883_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK], DatanodeInfoWithStorage[127.0.0.1:35747,DS-5eda46f5-e4b4-4b90-9fdf-bb6edd28d6df,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK]) is bad. 2024-11-19T04:55:35,874 WARN [Thread-1019 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741883_1067 2024-11-19T04:55:35,875 WARN [Thread-1019 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK] 2024-11-19T04:55:35,878 WARN [Thread-1019 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1068 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42129 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:35,878 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:55912 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741884_1068] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data4]'}, localName='127.0.0.1:38163', datanodeUuid='85b1f523-508c-4d33-9736-eb00eb9ff733', xmitsInProgress=0}:Exception transferring block BP-39784048-172.17.0.2-1731992096997:blk_1073741884_1068 to mirror 127.0.0.1:42129 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:35,878 WARN [Thread-1019 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741884_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38163,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK], DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK]) is bad. 2024-11-19T04:55:35,878 WARN [Thread-1019 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741884_1068 2024-11-19T04:55:35,878 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:55912 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741884_1068] {}] datanode.BlockReceiver(316): Block 1073741884 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T04:55:35,879 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:55912 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741884_1068] {}] datanode.DataXceiver(331): 127.0.0.1:38163:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55912 dst: /127.0.0.1:38163 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:35,879 WARN [Thread-1019 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK] 2024-11-19T04:55:35,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741885_1069 (size=11421) 2024-11-19T04:55:35,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741885_1069 (size=11421) 2024-11-19T04:55:36,004 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-19T04:55:36,004 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:36,079 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T04:55:36,079 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T04:55:36,080 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T04:55:36,080 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:55:36,080 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:55:36,080 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T04:55:36,080 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T04:55:36,080 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1048628396, stopped=false 2024-11-19T04:55:36,080 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=08a7f35e60d4,45139,1731992097985 2024-11-19T04:55:36,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42287-0x1012e93ff3f0002, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T04:55:36,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45139-0x1012e93ff3f0000, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T04:55:36,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38579-0x1012e93ff3f0001, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T04:55:36,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45139-0x1012e93ff3f0000, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:55:36,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42287-0x1012e93ff3f0002, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:55:36,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38579-0x1012e93ff3f0001, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:55:36,082 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T04:55:36,082 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T04:55:36,082 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T04:55:36,083 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:55:36,083 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '08a7f35e60d4,38579,1731992098058' ***** 2024-11-19T04:55:36,083 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T04:55:36,083 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45139-0x1012e93ff3f0000, quorum=127.0.0.1:50716, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T04:55:36,083 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '08a7f35e60d4,42287,1731992099051' ***** 2024-11-19T04:55:36,083 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T04:55:36,083 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42287-0x1012e93ff3f0002, quorum=127.0.0.1:50716, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T04:55:36,083 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38579-0x1012e93ff3f0001, quorum=127.0.0.1:50716, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T04:55:36,083 INFO [RS:0;08a7f35e60d4:38579 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T04:55:36,083 INFO [RS:1;08a7f35e60d4:42287 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T04:55:36,084 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T04:55:36,084 INFO [RS:1;08a7f35e60d4:42287 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T04:55:36,084 INFO [RS:1;08a7f35e60d4:42287 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T04:55:36,084 INFO [RS:1;08a7f35e60d4:42287 {}] regionserver.HRegionServer(959): stopping server 08a7f35e60d4,42287,1731992099051 2024-11-19T04:55:36,084 INFO [RS:1;08a7f35e60d4:42287 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T04:55:36,084 INFO [RS:1;08a7f35e60d4:42287 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;08a7f35e60d4:42287. 2024-11-19T04:55:36,084 DEBUG [RS:1;08a7f35e60d4:42287 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T04:55:36,084 DEBUG [RS:1;08a7f35e60d4:42287 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:55:36,084 INFO [RS:1;08a7f35e60d4:42287 {}] regionserver.HRegionServer(976): stopping server 08a7f35e60d4,42287,1731992099051; all regions closed. 2024-11-19T04:55:36,085 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:36,085 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:36,085 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:36,085 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:36,085 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:36,086 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:36,086 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:36,086 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 2024-11-19T04:55:36,087 WARN [IPC Server handler 4 on default port 41423 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 has not been closed. Lease recovery is in progress. RecoveryId = 1070 for block blk_1073741837_1013 2024-11-19T04:55:36,087 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 after 1ms 2024-11-19T04:55:36,137 INFO [regionserver/08a7f35e60d4:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-19T04:55:36,137 INFO [regionserver/08a7f35e60d4:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-19T04:55:36,285 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/.tmp/info/e85f698db428402e8d96e13494e991de 2024-11-19T04:55:36,292 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/.tmp/info/e85f698db428402e8d96e13494e991de as hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/info/e85f698db428402e8d96e13494e991de 2024-11-19T04:55:36,297 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/info/e85f698db428402e8d96e13494e991de, entries=6, sequenceid=55, filesize=11.2 K 2024-11-19T04:55:36,298 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=6.30 KB/6455 for f6a2e2ef47bf948c5ce3e0e7a516d28c in 431ms, sequenceid=55, compaction requested=true 2024-11-19T04:55:36,298 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f6a2e2ef47bf948c5ce3e0e7a516d28c: 2024-11-19T04:55:36,298 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.6 K, sizeToCheck=16.0 K 2024-11-19T04:55:36,298 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T04:55:36,298 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/info/7c9b3a118e8f49e593e64fe53cd3b88a because midkey is the same as first or last row 2024-11-19T04:55:36,298 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T04:55:36,298 INFO [RS:0;08a7f35e60d4:38579 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T04:55:36,299 INFO [RS:0;08a7f35e60d4:38579 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T04:55:36,299 INFO [RS:0;08a7f35e60d4:38579 {}] regionserver.HRegionServer(3091): Received CLOSE for f6a2e2ef47bf948c5ce3e0e7a516d28c 2024-11-19T04:55:36,299 INFO [RS:0;08a7f35e60d4:38579 {}] regionserver.HRegionServer(959): stopping server 08a7f35e60d4,38579,1731992098058 2024-11-19T04:55:36,299 INFO [RS:0;08a7f35e60d4:38579 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T04:55:36,299 INFO [RS:0;08a7f35e60d4:38579 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;08a7f35e60d4:38579. 2024-11-19T04:55:36,299 DEBUG [RS:0;08a7f35e60d4:38579 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T04:55:36,299 DEBUG [RS:0;08a7f35e60d4:38579 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:55:36,299 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing f6a2e2ef47bf948c5ce3e0e7a516d28c, disabling compactions & flushes 2024-11-19T04:55:36,299 INFO [RS:0;08a7f35e60d4:38579 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T04:55:36,299 INFO [RS:0;08a7f35e60d4:38579 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T04:55:36,299 INFO [RS:0;08a7f35e60d4:38579 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T04:55:36,299 INFO [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731992099159.f6a2e2ef47bf948c5ce3e0e7a516d28c. 2024-11-19T04:55:36,299 INFO [RS:0;08a7f35e60d4:38579 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T04:55:36,299 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731992099159.f6a2e2ef47bf948c5ce3e0e7a516d28c. 2024-11-19T04:55:36,299 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731992099159.f6a2e2ef47bf948c5ce3e0e7a516d28c. after waiting 0 ms 2024-11-19T04:55:36,299 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731992099159.f6a2e2ef47bf948c5ce3e0e7a516d28c. 2024-11-19T04:55:36,300 INFO [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing f6a2e2ef47bf948c5ce3e0e7a516d28c 1/1 column families, dataSize=6.30 KB heapSize=7 KB 2024-11-19T04:55:36,300 INFO [RS:0;08a7f35e60d4:38579 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-19T04:55:36,300 DEBUG [RS:0;08a7f35e60d4:38579 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, f6a2e2ef47bf948c5ce3e0e7a516d28c=TestLogRolling-testLogRollOnDatanodeDeath,,1731992099159.f6a2e2ef47bf948c5ce3e0e7a516d28c.} 2024-11-19T04:55:36,300 DEBUG [RS:0;08a7f35e60d4:38579 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, f6a2e2ef47bf948c5ce3e0e7a516d28c 2024-11-19T04:55:36,300 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T04:55:36,300 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T04:55:36,300 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T04:55:36,300 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T04:55:36,300 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T04:55:36,300 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-19T04:55:36,300 ERROR [FSHLog-0-hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7-prefix:08a7f35e60d4,38579,1731992098058.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:36,300 WARN [FSHLog-0-hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7-prefix:08a7f35e60d4,38579,1731992098058.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:36,301 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 08a7f35e60d4%2C38579%2C1731992098058.meta:.meta(num 1731992098895) roll requested 2024-11-19T04:55:36,301 INFO [regionserver/08a7f35e60d4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C38579%2C1731992098058.meta.1731992136301.meta 2024-11-19T04:55:36,303 WARN [Thread-1028 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1071 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:36,304 WARN [Thread-1028 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741886_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK], DatanodeInfoWithStorage[127.0.0.1:38163,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK]) is bad. 2024-11-19T04:55:36,304 WARN [Thread-1028 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741886_1071 2024-11-19T04:55:36,304 WARN [Thread-1028 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK] 2024-11-19T04:55:36,305 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/.tmp/info/8c279eaee8a54b79a0fd36dbb6c26172 is 1080, key is row0018/info:/1731992135868/Put/seqid=0 2024-11-19T04:55:36,306 WARN [Thread-1029 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1073 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:36,307 WARN [Thread-1029 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741888_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK], DatanodeInfoWithStorage[127.0.0.1:38163,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK]) is bad. 2024-11-19T04:55:36,307 WARN [Thread-1029 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741888_1073 2024-11-19T04:55:36,307 WARN [Thread-1029 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK] 2024-11-19T04:55:36,309 WARN [Thread-1029 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1074 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42129 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:36,309 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:55952 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741889_1074] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data4]'}, localName='127.0.0.1:38163', datanodeUuid='85b1f523-508c-4d33-9736-eb00eb9ff733', xmitsInProgress=0}:Exception transferring block BP-39784048-172.17.0.2-1731992096997:blk_1073741889_1074 to mirror 127.0.0.1:42129 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:36,309 WARN [Thread-1029 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741889_1074 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38163,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK], DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK]) is bad. 2024-11-19T04:55:36,309 WARN [Thread-1029 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741889_1074 2024-11-19T04:55:36,310 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:55952 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741889_1074] {}] datanode.BlockReceiver(316): Block 1073741889 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T04:55:36,310 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:55952 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741889_1074] {}] datanode.DataXceiver(331): 127.0.0.1:38163:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55952 dst: /127.0.0.1:38163 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:36,310 WARN [Thread-1029 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK] 2024-11-19T04:55:36,311 WARN [Thread-1029 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:36,311 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:36,311 WARN [Thread-1029 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741890_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK], DatanodeInfoWithStorage[127.0.0.1:35747,DS-5eda46f5-e4b4-4b90-9fdf-bb6edd28d6df,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]) is bad. 2024-11-19T04:55:36,311 WARN [Thread-1029 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741890_1075 2024-11-19T04:55:36,311 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:36,311 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:36,312 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:36,312 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:36,312 WARN [Thread-1029 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK] 2024-11-19T04:55:36,312 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992136301.meta 2024-11-19T04:55:36,312 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:36,312 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:36,312 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta 2024-11-19T04:55:36,313 WARN [IPC Server handler 1 on default port 41423 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta has not been closed. Lease recovery is in progress. RecoveryId = 1077 for block blk_1073741834_1010 2024-11-19T04:55:36,313 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43321:43321),(127.0.0.1/127.0.0.1:43337:43337)] 2024-11-19T04:55:36,313 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta is not closed yet, will try archiving it next time 2024-11-19T04:55:36,313 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta after 1ms 2024-11-19T04:55:36,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741891_1076 (size=11421) 2024-11-19T04:55:36,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741891_1076 (size=11421) 2024-11-19T04:55:36,316 INFO [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.30 KB at sequenceid=64 (bloomFilter=true), to=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/.tmp/info/8c279eaee8a54b79a0fd36dbb6c26172 2024-11-19T04:55:36,323 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/.tmp/info/8c279eaee8a54b79a0fd36dbb6c26172 as hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/info/8c279eaee8a54b79a0fd36dbb6c26172 2024-11-19T04:55:36,329 INFO [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/info/8c279eaee8a54b79a0fd36dbb6c26172, entries=6, sequenceid=64, filesize=11.2 K 2024-11-19T04:55:36,330 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/hbase/meta/1588230740/.tmp/info/09543a0767334714b1152d089b94330c is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731992099159.f6a2e2ef47bf948c5ce3e0e7a516d28c./info:regioninfo/1731992099520/Put/seqid=0 2024-11-19T04:55:36,330 INFO [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~6.30 KB/6455, heapSize ~6.98 KB/7152, currentSize=0 B/0 for f6a2e2ef47bf948c5ce3e0e7a516d28c in 31ms, sequenceid=64, compaction requested=true 2024-11-19T04:55:36,331 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731992099159.f6a2e2ef47bf948c5ce3e0e7a516d28c.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/info/3d2b4d73137c484b9701a8091390569f, hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/info/f2e562f5978948e18bb69517afdedbda, hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/info/118f4a12b35242d9a8aebcf7e684cc64] to archive 2024-11-19T04:55:36,332 WARN [Thread-1041 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741892_1078 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:36,333 WARN [Thread-1041 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741892_1078 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK], DatanodeInfoWithStorage[127.0.0.1:38163,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK]) is bad. 2024-11-19T04:55:36,333 WARN [Thread-1041 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741892_1078 2024-11-19T04:55:36,333 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731992099159.f6a2e2ef47bf948c5ce3e0e7a516d28c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T04:55:36,333 WARN [Thread-1041 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK] 2024-11-19T04:55:36,334 INFO [regionserver/08a7f35e60d4:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T04:55:36,335 WARN [Thread-1041 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1079 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:36,335 WARN [Thread-1041 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741893_1079 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK], DatanodeInfoWithStorage[127.0.0.1:38163,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK]) is bad. 2024-11-19T04:55:36,335 WARN [Thread-1041 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741893_1079 2024-11-19T04:55:36,335 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731992099159.f6a2e2ef47bf948c5ce3e0e7a516d28c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/info/3d2b4d73137c484b9701a8091390569f to hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/info/3d2b4d73137c484b9701a8091390569f 2024-11-19T04:55:36,336 WARN [Thread-1041 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK] 2024-11-19T04:55:36,337 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731992099159.f6a2e2ef47bf948c5ce3e0e7a516d28c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/info/f2e562f5978948e18bb69517afdedbda to hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/info/f2e562f5978948e18bb69517afdedbda 2024-11-19T04:55:36,339 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731992099159.f6a2e2ef47bf948c5ce3e0e7a516d28c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/info/118f4a12b35242d9a8aebcf7e684cc64 to hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/info/118f4a12b35242d9a8aebcf7e684cc64 2024-11-19T04:55:36,339 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731992099159.f6a2e2ef47bf948c5ce3e0e7a516d28c.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=08a7f35e60d4:45139 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-19T04:55:36,340 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731992099159.f6a2e2ef47bf948c5ce3e0e7a516d28c.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [3d2b4d73137c484b9701a8091390569f=10347, f2e562f5978948e18bb69517afdedbda=12506, 118f4a12b35242d9a8aebcf7e684cc64=6027] 2024-11-19T04:55:36,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741894_1080 (size=7089) 2024-11-19T04:55:36,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741894_1080 (size=7089) 2024-11-19T04:55:36,352 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f6a2e2ef47bf948c5ce3e0e7a516d28c/recovered.edits/67.seqid, newMaxSeqId=67, maxSeqId=1 2024-11-19T04:55:36,352 INFO [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731992099159.f6a2e2ef47bf948c5ce3e0e7a516d28c. 2024-11-19T04:55:36,352 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for f6a2e2ef47bf948c5ce3e0e7a516d28c: Waiting for close lock at 1731992136299Running coprocessor pre-close hooks at 1731992136299Disabling compacts and flushes for region at 1731992136299Disabling writes for close at 1731992136299Obtaining lock to block concurrent updates at 1731992136300 (+1 ms)Preparing flush snapshotting stores in f6a2e2ef47bf948c5ce3e0e7a516d28c at 1731992136300Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1731992099159.f6a2e2ef47bf948c5ce3e0e7a516d28c., syncing WAL and waiting on mvcc, flushsize=dataSize=6455, getHeapSize=7152, getOffHeapSize=0, getCellsCount=6 at 1731992136300Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1731992099159.f6a2e2ef47bf948c5ce3e0e7a516d28c. at 1731992136301 (+1 ms)Flushing f6a2e2ef47bf948c5ce3e0e7a516d28c/info: creating writer at 1731992136301Flushing f6a2e2ef47bf948c5ce3e0e7a516d28c/info: appending metadata at 1731992136304 (+3 ms)Flushing f6a2e2ef47bf948c5ce3e0e7a516d28c/info: closing flushed file at 1731992136305 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@762df67b: reopening flushed file at 1731992136322 (+17 ms)Finished flush of dataSize ~6.30 KB/6455, heapSize ~6.98 KB/7152, currentSize=0 B/0 for f6a2e2ef47bf948c5ce3e0e7a516d28c in 31ms, sequenceid=64, compaction requested=true at 1731992136331 (+9 ms)Writing region close event to WAL at 1731992136346 (+15 ms)Running coprocessor post-close hooks at 1731992136352 (+6 ms)Closed at 1731992136352 2024-11-19T04:55:36,353 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731992099159.f6a2e2ef47bf948c5ce3e0e7a516d28c. 2024-11-19T04:55:36,406 INFO [regionserver/08a7f35e60d4:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-19T04:55:36,406 INFO [regionserver/08a7f35e60d4:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-19T04:55:36,475 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@625232db[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35747, datanodeUuid=25819d07-eb75-435a-ad81-b178b212ee16, infoPort=43337, infoSecurePort=0, ipcPort=46517, storageInfo=lv=-57;cid=testClusterID;nsid=85150078;c=1731992096997):Failed to transfer BP-39784048-172.17.0.2-1731992096997:blk_1073741860_1043 to 127.0.0.1:44731 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:36,500 DEBUG [RS:0;08a7f35e60d4:38579 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-19T04:55:36,700 DEBUG [RS:0;08a7f35e60d4:38579 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-19T04:55:36,750 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/hbase/meta/1588230740/.tmp/info/09543a0767334714b1152d089b94330c 2024-11-19T04:55:36,772 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/hbase/meta/1588230740/.tmp/ns/91ba011d0af04f509d7e0522bf43e973 is 43, key is default/ns:d/1731992098943/Put/seqid=0 2024-11-19T04:55:36,774 WARN [Thread-1049 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741895_1081 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:36,774 WARN [Thread-1049 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741895_1081 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK], DatanodeInfoWithStorage[127.0.0.1:35747,DS-5eda46f5-e4b4-4b90-9fdf-bb6edd28d6df,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK]) is bad. 2024-11-19T04:55:36,774 WARN [Thread-1049 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741895_1081 2024-11-19T04:55:36,775 WARN [Thread-1049 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK] 2024-11-19T04:55:36,777 WARN [Thread-1049 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741896_1082 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44731 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:36,777 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:55976 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741896_1082] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data4]'}, localName='127.0.0.1:38163', datanodeUuid='85b1f523-508c-4d33-9736-eb00eb9ff733', xmitsInProgress=0}:Exception transferring block BP-39784048-172.17.0.2-1731992096997:blk_1073741896_1082 to mirror 127.0.0.1:44731 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:36,777 WARN [Thread-1049 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741896_1082 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38163,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK], DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK]) is bad. 2024-11-19T04:55:36,777 WARN [Thread-1049 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741896_1082 2024-11-19T04:55:36,777 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:55976 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741896_1082] {}] datanode.BlockReceiver(316): Block 1073741896 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T04:55:36,777 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:55976 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741896_1082] {}] datanode.DataXceiver(331): 127.0.0.1:38163:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55976 dst: /127.0.0.1:38163 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:36,778 WARN [Thread-1049 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK] 2024-11-19T04:55:36,780 WARN [Thread-1049 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1083 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43353 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:36,779 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:55978 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741897_1083] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data4]'}, localName='127.0.0.1:38163', datanodeUuid='85b1f523-508c-4d33-9736-eb00eb9ff733', xmitsInProgress=0}:Exception transferring block BP-39784048-172.17.0.2-1731992096997:blk_1073741897_1083 to mirror 127.0.0.1:43353 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:36,780 WARN [Thread-1049 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741897_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38163,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK], DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]) is bad. 2024-11-19T04:55:36,780 WARN [Thread-1049 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741897_1083 2024-11-19T04:55:36,780 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:55978 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741897_1083] {}] datanode.BlockReceiver(316): Block 1073741897 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T04:55:36,780 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_865809737_22 at /127.0.0.1:55978 [Receiving block BP-39784048-172.17.0.2-1731992096997:blk_1073741897_1083] {}] datanode.DataXceiver(331): 127.0.0.1:38163:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55978 dst: /127.0.0.1:38163 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:36,780 WARN [Thread-1049 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK] 2024-11-19T04:55:36,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741898_1084 (size=5153) 2024-11-19T04:55:36,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741898_1084 (size=5153) 2024-11-19T04:55:36,790 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/hbase/meta/1588230740/.tmp/ns/91ba011d0af04f509d7e0522bf43e973 2024-11-19T04:55:36,810 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/hbase/meta/1588230740/.tmp/table/b861a9ae628d4410ab17b8bec4bc4a60 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731992099530/Put/seqid=0 2024-11-19T04:55:36,812 WARN [Thread-1057 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1085 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:36,812 WARN [Thread-1057 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741899_1085 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK], DatanodeInfoWithStorage[127.0.0.1:44731,DS-815f78d8-f80b-45d3-92d4-6298459f6366,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK]) is bad. 2024-11-19T04:55:36,812 WARN [Thread-1057 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741899_1085 2024-11-19T04:55:36,813 WARN [Thread-1057 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42129,DS-96e7db7d-603b-4c2c-a816-38776992632a,DISK] 2024-11-19T04:55:36,814 WARN [Thread-1057 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741900_1086 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:36,814 WARN [Thread-1057 {}] hdfs.DataStreamer(1731): Error Recovery for BP-39784048-172.17.0.2-1731992096997:blk_1073741900_1086 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK], DatanodeInfoWithStorage[127.0.0.1:38163,DS-937871f2-4c41-459f-abfb-7cc01ab45ce0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK]) is bad. 2024-11-19T04:55:36,814 WARN [Thread-1057 {}] hdfs.DataStreamer(1850): Abandoning BP-39784048-172.17.0.2-1731992096997:blk_1073741900_1086 2024-11-19T04:55:36,815 WARN [Thread-1057 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43353,DS-d3f45f49-9c40-40b7-9a65-1bb1482f01f9,DISK] 2024-11-19T04:55:36,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741901_1087 (size=5424) 2024-11-19T04:55:36,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741901_1087 (size=5424) 2024-11-19T04:55:36,820 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/hbase/meta/1588230740/.tmp/table/b861a9ae628d4410ab17b8bec4bc4a60 2024-11-19T04:55:36,831 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/hbase/meta/1588230740/.tmp/info/09543a0767334714b1152d089b94330c as hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/hbase/meta/1588230740/info/09543a0767334714b1152d089b94330c 2024-11-19T04:55:36,839 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/hbase/meta/1588230740/info/09543a0767334714b1152d089b94330c, entries=10, sequenceid=11, filesize=6.9 K 2024-11-19T04:55:36,840 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/hbase/meta/1588230740/.tmp/ns/91ba011d0af04f509d7e0522bf43e973 as hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/hbase/meta/1588230740/ns/91ba011d0af04f509d7e0522bf43e973 2024-11-19T04:55:36,847 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/hbase/meta/1588230740/ns/91ba011d0af04f509d7e0522bf43e973, entries=2, sequenceid=11, filesize=5.0 K 2024-11-19T04:55:36,848 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/hbase/meta/1588230740/.tmp/table/b861a9ae628d4410ab17b8bec4bc4a60 as hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/hbase/meta/1588230740/table/b861a9ae628d4410ab17b8bec4bc4a60 2024-11-19T04:55:36,856 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/hbase/meta/1588230740/table/b861a9ae628d4410ab17b8bec4bc4a60, entries=2, sequenceid=11, filesize=5.3 K 2024-11-19T04:55:36,857 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 557ms, sequenceid=11, compaction requested=false 2024-11-19T04:55:36,863 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-19T04:55:36,863 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T04:55:36,863 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T04:55:36,864 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731992136300Running coprocessor pre-close hooks at 1731992136300Disabling compacts and flushes for region at 1731992136300Disabling writes for close at 1731992136300Obtaining lock to block concurrent updates at 1731992136300Preparing flush snapshotting stores in 1588230740 at 1731992136300Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731992136300Flushing stores of hbase:meta,,1.1588230740 at 1731992136314 (+14 ms)Flushing 1588230740/info: creating writer at 1731992136314Flushing 1588230740/info: appending metadata at 1731992136330 (+16 ms)Flushing 1588230740/info: closing flushed file at 1731992136330Flushing 1588230740/ns: creating writer at 1731992136757 (+427 ms)Flushing 1588230740/ns: appending metadata at 1731992136771 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731992136771Flushing 1588230740/table: creating writer at 1731992136796 (+25 ms)Flushing 1588230740/table: appending metadata at 1731992136810 (+14 ms)Flushing 1588230740/table: closing flushed file at 1731992136810Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2e165fd2: reopening flushed file at 1731992136827 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5154710f: reopening flushed file at 1731992136839 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@527be24e: reopening flushed file at 1731992136847 (+8 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 557ms, sequenceid=11, compaction requested=false at 1731992136857 (+10 ms)Writing region close event to WAL at 1731992136859 (+2 ms)Running coprocessor post-close hooks at 1731992136863 (+4 ms)Closed at 1731992136863 2024-11-19T04:55:36,864 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T04:55:36,900 INFO [RS:0;08a7f35e60d4:38579 {}] regionserver.HRegionServer(976): stopping server 08a7f35e60d4,38579,1731992098058; all regions closed. 2024-11-19T04:55:36,901 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:36,901 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:36,901 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:36,901 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:36,901 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:36,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741887_1072 (size=825) 2024-11-19T04:55:36,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741887_1072 (size=825) 2024-11-19T04:55:37,118 INFO [regionserver/08a7f35e60d4:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T04:55:37,146 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-19T04:55:37,147 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T04:55:37,147 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T04:55:38,972 INFO [master/08a7f35e60d4:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-19T04:55:38,972 INFO [master/08a7f35e60d4:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-19T04:55:39,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741836_1012 (size=76) 2024-11-19T04:55:39,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741832_1008 (size=32) 2024-11-19T04:55:40,088 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 after 4002ms 2024-11-19T04:55:40,314 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta after 4002ms 2024-11-19T04:55:40,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741826_1002 (size=42) 2024-11-19T04:55:40,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741828_1004 (size=1189) 2024-11-19T04:55:40,375 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@5d31a4a7 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-39784048-172.17.0.2-1731992096997:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:43353,null,null]) java.net.ConnectException: Call From 08a7f35e60d4/172.17.0.2 to localhost:39473 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-19T04:55:41,086 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-19T04:55:41,088 DEBUG [RS:1;08a7f35e60d4:42287 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/oldWALs 2024-11-19T04:55:41,088 INFO [RS:1;08a7f35e60d4:42287 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 08a7f35e60d4%2C42287%2C1731992099051:(num 1731992099260) 2024-11-19T04:55:41,088 DEBUG [RS:1;08a7f35e60d4:42287 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:55:41,088 INFO [RS:1;08a7f35e60d4:42287 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T04:55:41,088 INFO [RS:1;08a7f35e60d4:42287 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T04:55:41,088 INFO [RS:1;08a7f35e60d4:42287 {}] hbase.ChoreService(370): Chore service for: regionserver/08a7f35e60d4:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-19T04:55:41,089 INFO [RS:1;08a7f35e60d4:42287 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T04:55:41,089 INFO [RS:1;08a7f35e60d4:42287 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T04:55:41,089 INFO [RS:1;08a7f35e60d4:42287 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T04:55:41,089 INFO [RS:1;08a7f35e60d4:42287 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T04:55:41,089 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T04:55:41,089 INFO [RS:1;08a7f35e60d4:42287 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42287 2024-11-19T04:55:41,091 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45139-0x1012e93ff3f0000, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T04:55:41,091 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42287-0x1012e93ff3f0002, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/08a7f35e60d4,42287,1731992099051 2024-11-19T04:55:41,091 INFO [RS:1;08a7f35e60d4:42287 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T04:55:41,092 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [08a7f35e60d4,42287,1731992099051] 2024-11-19T04:55:41,093 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/08a7f35e60d4,42287,1731992099051 already deleted, retry=false 2024-11-19T04:55:41,093 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 08a7f35e60d4,42287,1731992099051 expired; onlineServers=1 2024-11-19T04:55:41,093 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:41,193 INFO [RS:1;08a7f35e60d4:42287 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T04:55:41,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42287-0x1012e93ff3f0002, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T04:55:41,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42287-0x1012e93ff3f0002, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T04:55:41,193 INFO [RS:1;08a7f35e60d4:42287 {}] regionserver.HRegionServer(1031): Exiting; stopping=08a7f35e60d4,42287,1731992099051; zookeeper connection closed. 2024-11-19T04:55:41,193 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7cb7fc47 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7cb7fc47 2024-11-19T04:55:41,353 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:55:41,374 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:55:41,374 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:55:41,375 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:55:41,382 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:55:41,382 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:55:41,884 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T04:55:41,897 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:55:41,897 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:55:41,898 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:55:41,898 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:55:41,899 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:55:41,902 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-19T04:55:41,903 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:55:41,905 DEBUG [RS:0;08a7f35e60d4:38579 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/oldWALs 2024-11-19T04:55:41,905 INFO [RS:0;08a7f35e60d4:38579 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 08a7f35e60d4%2C38579%2C1731992098058.meta:.meta(num 1731992136301) 2024-11-19T04:55:41,905 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:41,906 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:41,906 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:41,906 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:41,906 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:41,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741882_1066 (size=14682) 2024-11-19T04:55:41,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741882_1066 (size=14682) 2024-11-19T04:55:41,915 DEBUG [RS:0;08a7f35e60d4:38579 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/oldWALs 2024-11-19T04:55:41,915 INFO [RS:0;08a7f35e60d4:38579 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 08a7f35e60d4%2C38579%2C1731992098058:(num 1731992135845) 2024-11-19T04:55:41,915 DEBUG [RS:0;08a7f35e60d4:38579 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:55:41,915 INFO [RS:0;08a7f35e60d4:38579 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T04:55:41,915 INFO [RS:0;08a7f35e60d4:38579 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T04:55:41,915 INFO [RS:0;08a7f35e60d4:38579 {}] hbase.ChoreService(370): Chore service for: regionserver/08a7f35e60d4:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-19T04:55:41,915 INFO [RS:0;08a7f35e60d4:38579 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T04:55:41,915 INFO [RS:0;08a7f35e60d4:38579 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38579 2024-11-19T04:55:41,915 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T04:55:41,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45139-0x1012e93ff3f0000, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T04:55:41,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38579-0x1012e93ff3f0001, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/08a7f35e60d4,38579,1731992098058 2024-11-19T04:55:41,919 INFO [RS:0;08a7f35e60d4:38579 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T04:55:41,921 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [08a7f35e60d4,38579,1731992098058] 2024-11-19T04:55:41,923 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/08a7f35e60d4,38579,1731992098058 already deleted, retry=false 2024-11-19T04:55:41,923 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 08a7f35e60d4,38579,1731992098058 expired; onlineServers=0 2024-11-19T04:55:41,923 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '08a7f35e60d4,45139,1731992097985' ***** 2024-11-19T04:55:41,923 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T04:55:41,923 INFO [M:0;08a7f35e60d4:45139 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T04:55:41,923 INFO [M:0;08a7f35e60d4:45139 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T04:55:41,923 DEBUG [M:0;08a7f35e60d4:45139 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T04:55:41,923 DEBUG [M:0;08a7f35e60d4:45139 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T04:55:41,923 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T04:55:41,923 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.large.0-1731992098270 {}] cleaner.HFileCleaner(306): Exit Thread[master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.large.0-1731992098270,5,FailOnTimeoutGroup] 2024-11-19T04:55:41,924 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.small.0-1731992098276 {}] cleaner.HFileCleaner(306): Exit Thread[master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.small.0-1731992098276,5,FailOnTimeoutGroup] 2024-11-19T04:55:41,924 INFO [M:0;08a7f35e60d4:45139 {}] hbase.ChoreService(370): Chore service for: master/08a7f35e60d4:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T04:55:41,924 INFO [M:0;08a7f35e60d4:45139 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T04:55:41,924 DEBUG [M:0;08a7f35e60d4:45139 {}] master.HMaster(1795): Stopping service threads 2024-11-19T04:55:41,924 INFO [M:0;08a7f35e60d4:45139 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T04:55:41,924 INFO [M:0;08a7f35e60d4:45139 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T04:55:41,924 INFO [M:0;08a7f35e60d4:45139 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T04:55:41,924 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T04:55:41,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45139-0x1012e93ff3f0000, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T04:55:41,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45139-0x1012e93ff3f0000, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:55:41,925 DEBUG [M:0;08a7f35e60d4:45139 {}] zookeeper.ZKUtil(347): master:45139-0x1012e93ff3f0000, quorum=127.0.0.1:50716, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T04:55:41,925 WARN [M:0;08a7f35e60d4:45139 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T04:55:41,926 INFO [M:0;08a7f35e60d4:45139 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/.lastflushedseqids 2024-11-19T04:55:41,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741902_1088 (size=130) 2024-11-19T04:55:41,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741902_1088 (size=130) 2024-11-19T04:55:41,943 INFO [M:0;08a7f35e60d4:45139 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T04:55:41,944 INFO [M:0;08a7f35e60d4:45139 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T04:55:41,944 DEBUG [M:0;08a7f35e60d4:45139 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T04:55:41,944 INFO [M:0;08a7f35e60d4:45139 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:55:41,944 DEBUG [M:0;08a7f35e60d4:45139 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:55:41,944 DEBUG [M:0;08a7f35e60d4:45139 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T04:55:41,944 DEBUG [M:0;08a7f35e60d4:45139 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:55:41,944 INFO [M:0;08a7f35e60d4:45139 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.26 KB heapSize=29.50 KB 2024-11-19T04:55:41,970 DEBUG [M:0;08a7f35e60d4:45139 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/aaa75284117b4d368d474979558d0d21 is 82, key is hbase:meta,,1/info:regioninfo/1731992098923/Put/seqid=0 2024-11-19T04:55:41,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741903_1089 (size=5672) 2024-11-19T04:55:41,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741903_1089 (size=5672) 2024-11-19T04:55:41,976 INFO [M:0;08a7f35e60d4:45139 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/aaa75284117b4d368d474979558d0d21 2024-11-19T04:55:41,997 DEBUG [M:0;08a7f35e60d4:45139 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/640a621db9854ff59c19c969c68e74e6 is 775, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731992099535/Put/seqid=0 2024-11-19T04:55:42,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741904_1090 (size=6256) 2024-11-19T04:55:42,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741904_1090 (size=6256) 2024-11-19T04:55:42,003 INFO [M:0;08a7f35e60d4:45139 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.59 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/640a621db9854ff59c19c969c68e74e6 2024-11-19T04:55:42,008 INFO [M:0;08a7f35e60d4:45139 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 640a621db9854ff59c19c969c68e74e6 2024-11-19T04:55:42,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38579-0x1012e93ff3f0001, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T04:55:42,021 INFO [RS:0;08a7f35e60d4:38579 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T04:55:42,021 INFO [RS:0;08a7f35e60d4:38579 {}] regionserver.HRegionServer(1031): Exiting; stopping=08a7f35e60d4,38579,1731992098058; zookeeper connection closed. 2024-11-19T04:55:42,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38579-0x1012e93ff3f0001, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T04:55:42,022 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@457ceff3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@457ceff3 2024-11-19T04:55:42,022 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-19T04:55:42,023 DEBUG [M:0;08a7f35e60d4:45139 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3ea9c2ab134743899fb9f49e50f0d511 is 69, key is 08a7f35e60d4,38579,1731992098058/rs:state/1731992098315/Put/seqid=0 2024-11-19T04:55:42,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741905_1091 (size=5224) 2024-11-19T04:55:42,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741905_1091 (size=5224) 2024-11-19T04:55:42,029 INFO [M:0;08a7f35e60d4:45139 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3ea9c2ab134743899fb9f49e50f0d511 2024-11-19T04:55:42,054 DEBUG [M:0;08a7f35e60d4:45139 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5d1e21e4cb6946838d0971f6dc6c3f79 is 52, key is load_balancer_on/state:d/1731992098996/Put/seqid=0 2024-11-19T04:55:42,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741906_1092 (size=5056) 2024-11-19T04:55:42,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741906_1092 (size=5056) 2024-11-19T04:55:42,061 INFO [M:0;08a7f35e60d4:45139 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5d1e21e4cb6946838d0971f6dc6c3f79 2024-11-19T04:55:42,068 DEBUG [M:0;08a7f35e60d4:45139 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/aaa75284117b4d368d474979558d0d21 as hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/aaa75284117b4d368d474979558d0d21 2024-11-19T04:55:42,074 INFO [M:0;08a7f35e60d4:45139 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/aaa75284117b4d368d474979558d0d21, entries=8, sequenceid=60, filesize=5.5 K 2024-11-19T04:55:42,075 DEBUG [M:0;08a7f35e60d4:45139 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/640a621db9854ff59c19c969c68e74e6 as hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/640a621db9854ff59c19c969c68e74e6 2024-11-19T04:55:42,080 INFO [M:0;08a7f35e60d4:45139 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 640a621db9854ff59c19c969c68e74e6 2024-11-19T04:55:42,081 INFO [M:0;08a7f35e60d4:45139 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/640a621db9854ff59c19c969c68e74e6, entries=6, sequenceid=60, filesize=6.1 K 2024-11-19T04:55:42,082 DEBUG [M:0;08a7f35e60d4:45139 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3ea9c2ab134743899fb9f49e50f0d511 as hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3ea9c2ab134743899fb9f49e50f0d511 2024-11-19T04:55:42,087 INFO [M:0;08a7f35e60d4:45139 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3ea9c2ab134743899fb9f49e50f0d511, entries=2, sequenceid=60, filesize=5.1 K 2024-11-19T04:55:42,088 DEBUG [M:0;08a7f35e60d4:45139 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5d1e21e4cb6946838d0971f6dc6c3f79 as hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5d1e21e4cb6946838d0971f6dc6c3f79 2024-11-19T04:55:42,094 INFO [M:0;08a7f35e60d4:45139 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5d1e21e4cb6946838d0971f6dc6c3f79, entries=1, sequenceid=60, filesize=4.9 K 2024-11-19T04:55:42,094 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:42,095 INFO [M:0;08a7f35e60d4:45139 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 151ms, sequenceid=60, compaction requested=false 2024-11-19T04:55:42,097 INFO [M:0;08a7f35e60d4:45139 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:55:42,097 DEBUG [M:0;08a7f35e60d4:45139 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731992141944Disabling compacts and flushes for region at 1731992141944Disabling writes for close at 1731992141944Obtaining lock to block concurrent updates at 1731992141944Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731992141944Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23817, getHeapSize=30144, getOffHeapSize=0, getCellsCount=71 at 1731992141945 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731992141946 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731992141946Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731992141969 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731992141969Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731992141982 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731992141996 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731992141996Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731992142008 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731992142023 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731992142023Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731992142035 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731992142053 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731992142053Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@45b125fb: reopening flushed file at 1731992142067 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6ba82f12: reopening flushed file at 1731992142074 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@79d14571: reopening flushed file at 1731992142081 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@60a713b6: reopening flushed file at 1731992142088 (+7 ms)Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 151ms, sequenceid=60, compaction requested=false at 1731992142095 (+7 ms)Writing region close event to WAL at 1731992142097 (+2 ms)Closed at 1731992142097 2024-11-19T04:55:42,097 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:42,097 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:42,097 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:42,098 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:42,098 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:42,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741879_1062 (size=1045) 2024-11-19T04:55:42,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38163 is added to blk_1073741879_1062 (size=1045) 2024-11-19T04:55:42,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:42,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741835_1011 (size=393) 2024-11-19T04:55:42,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741831_1007 (size=1321) 2024-11-19T04:55:42,501 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T04:55:42,501 INFO [M:0;08a7f35e60d4:45139 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T04:55:42,501 INFO [M:0;08a7f35e60d4:45139 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45139 2024-11-19T04:55:42,501 INFO [M:0;08a7f35e60d4:45139 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T04:55:42,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45139-0x1012e93ff3f0000, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T04:55:42,604 INFO [M:0;08a7f35e60d4:45139 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T04:55:42,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45139-0x1012e93ff3f0000, quorum=127.0.0.1:50716, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T04:55:42,607 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2852206a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:55:42,607 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@184e2f57{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T04:55:42,607 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T04:55:42,607 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16a2580d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T04:55:42,608 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@568b1686{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/hadoop.log.dir/,STOPPED} 2024-11-19T04:55:42,609 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T04:55:42,609 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T04:55:42,609 WARN [BP-39784048-172.17.0.2-1731992096997 heartbeating to localhost/127.0.0.1:41423 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T04:55:42,609 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6d47403e {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-39784048-172.17.0.2-1731992096997:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:43353,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:39473 , LocalHost:localPort 08a7f35e60d4/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-19T04:55:42,609 WARN [BP-39784048-172.17.0.2-1731992096997 heartbeating to localhost/127.0.0.1:41423 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-39784048-172.17.0.2-1731992096997 (Datanode Uuid 85b1f523-508c-4d33-9736-eb00eb9ff733) service to localhost/127.0.0.1:41423 2024-11-19T04:55:42,610 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data3/current/BP-39784048-172.17.0.2-1731992096997 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:55:42,610 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data4/current/BP-39784048-172.17.0.2-1731992096997 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:55:42,610 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6d47403e {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-39784048-172.17.0.2-1731992096997:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:43353,null,null], DatanodeInfoWithStorage[127.0.0.1:38163,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-39784048-172.17.0.2-1731992096997 2024-11-19T04:55:42,610 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T04:55:42,610 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6d47403e {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-39784048-172.17.0.2-1731992096997:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:43353,null,null]) java.io.IOException: No block pool offer service for bpid=BP-39784048-172.17.0.2-1731992096997 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:42,611 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6d47403e {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-39784048-172.17.0.2-1731992096997:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:38163,null,null]) java.io.IOException: No block pool offer service for bpid=BP-39784048-172.17.0.2-1731992096997 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:42,611 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6d47403e {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-39784048-172.17.0.2-1731992096997:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:43353,null,null], DatanodeInfoWithStorage[127.0.0.1:38163,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-39784048-172.17.0.2-1731992096997:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:43353,null,null], DatanodeInfoWithStorage[127.0.0.1:38163,null,null]] 2024-11-19T04:55:42,612 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@9038e26{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:55:42,613 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@501a5826{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T04:55:42,613 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T04:55:42,613 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@36d0b5ff{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T04:55:42,613 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ff5703b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/hadoop.log.dir/,STOPPED} 2024-11-19T04:55:42,615 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T04:55:42,615 WARN [BP-39784048-172.17.0.2-1731992096997 heartbeating to localhost/127.0.0.1:41423 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T04:55:42,615 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T04:55:42,615 WARN [BP-39784048-172.17.0.2-1731992096997 heartbeating to localhost/127.0.0.1:41423 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-39784048-172.17.0.2-1731992096997 (Datanode Uuid 25819d07-eb75-435a-ad81-b178b212ee16) service to localhost/127.0.0.1:41423 2024-11-19T04:55:42,616 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data5/current/BP-39784048-172.17.0.2-1731992096997 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:55:42,616 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/cluster_0cf594c5-dee1-5e44-06f9-5ae053e65da5/data/data6/current/BP-39784048-172.17.0.2-1731992096997 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:55:42,616 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T04:55:42,622 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5519c514{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T04:55:42,622 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@47f2ada2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T04:55:42,622 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T04:55:42,623 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35c95cb4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T04:55:42,623 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@17c48ca{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/hadoop.log.dir/,STOPPED} 2024-11-19T04:55:42,631 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T04:55:42,660 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T04:55:42,669 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=157 (was 82) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41423 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41423 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f0f68bf4db8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f0f68bf4db8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41423 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:41423 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41423 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:40649 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40649 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41423 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41423 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:41423 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41423 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41423 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41423 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 404) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=166 (was 177), ProcessCount=11 (was 11), AvailableMemoryMB=12137 (was 12185) 2024-11-19T04:55:42,676 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=157, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=166, ProcessCount=11, AvailableMemoryMB=12138 2024-11-19T04:55:42,676 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T04:55:42,676 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/hadoop.log.dir so I do NOT create it in target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6 2024-11-19T04:55:42,676 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6f335e0-2f82-e6f0-307b-b2d747479e50/hadoop.tmp.dir so I do NOT create it in target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6 2024-11-19T04:55:42,676 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/cluster_14c23cca-1b11-3138-a135-f4ba00ba23c7, deleteOnExit=true 2024-11-19T04:55:42,676 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T04:55:42,677 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/test.cache.data in system properties and HBase conf 2024-11-19T04:55:42,677 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T04:55:42,677 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/hadoop.log.dir in system properties and HBase conf 2024-11-19T04:55:42,677 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T04:55:42,677 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T04:55:42,677 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T04:55:42,677 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T04:55:42,677 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T04:55:42,678 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T04:55:42,678 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T04:55:42,678 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T04:55:42,678 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T04:55:42,678 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T04:55:42,678 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T04:55:42,678 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T04:55:42,678 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T04:55:42,678 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/nfs.dump.dir in system properties and HBase conf 2024-11-19T04:55:42,679 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/java.io.tmpdir in system properties and HBase conf 2024-11-19T04:55:42,679 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T04:55:42,679 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T04:55:42,679 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T04:55:42,693 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T04:55:42,780 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:55:42,785 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T04:55:42,786 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T04:55:42,786 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T04:55:42,786 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T04:55:42,787 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:55:42,787 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7bd03e52{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/hadoop.log.dir/,AVAILABLE} 2024-11-19T04:55:42,788 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@27a49013{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T04:55:42,917 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1915705e{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/java.io.tmpdir/jetty-localhost-43119-hadoop-hdfs-3_4_1-tests_jar-_-any-17504828143503160422/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T04:55:42,918 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@17b2a9ba{HTTP/1.1, (http/1.1)}{localhost:43119} 2024-11-19T04:55:42,918 INFO [Time-limited test {}] server.Server(415): Started @150962ms 2024-11-19T04:55:42,931 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T04:55:43,009 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:55:43,012 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T04:55:43,014 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T04:55:43,014 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T04:55:43,014 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T04:55:43,015 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25f63c50{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/hadoop.log.dir/,AVAILABLE} 2024-11-19T04:55:43,016 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@165d0fad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T04:55:43,095 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:43,133 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6f0827f7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/java.io.tmpdir/jetty-localhost-44439-hadoop-hdfs-3_4_1-tests_jar-_-any-12341226440501500806/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:55:43,134 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2744dc92{HTTP/1.1, (http/1.1)}{localhost:44439} 2024-11-19T04:55:43,135 INFO [Time-limited test {}] server.Server(415): Started @151179ms 2024-11-19T04:55:43,136 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T04:55:43,171 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:55:43,174 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T04:55:43,175 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T04:55:43,175 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T04:55:43,175 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T04:55:43,176 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11255fea{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/hadoop.log.dir/,AVAILABLE} 2024-11-19T04:55:43,176 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3fd7563{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T04:55:43,265 WARN [Thread-1180 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/cluster_14c23cca-1b11-3138-a135-f4ba00ba23c7/data/data2/current/BP-1239689047-172.17.0.2-1731992142713/current, will proceed with Du for space computation calculation, 2024-11-19T04:55:43,265 WARN [Thread-1179 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/cluster_14c23cca-1b11-3138-a135-f4ba00ba23c7/data/data1/current/BP-1239689047-172.17.0.2-1731992142713/current, will proceed with Du for space computation calculation, 2024-11-19T04:55:43,284 WARN [Thread-1158 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T04:55:43,287 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4d8f6a7796cb5eb7 with lease ID 0xe05a4096d0ba6cf0: Processing first storage report for DS-5f22df2c-221c-448e-9d00-366dafd2a73a from datanode DatanodeRegistration(127.0.0.1:43811, datanodeUuid=2717070f-ca22-4f46-80fb-eee7927e4ef3, infoPort=40747, infoSecurePort=0, ipcPort=38839, storageInfo=lv=-57;cid=testClusterID;nsid=1235982065;c=1731992142713) 2024-11-19T04:55:43,287 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4d8f6a7796cb5eb7 with lease ID 0xe05a4096d0ba6cf0: from storage DS-5f22df2c-221c-448e-9d00-366dafd2a73a node DatanodeRegistration(127.0.0.1:43811, datanodeUuid=2717070f-ca22-4f46-80fb-eee7927e4ef3, infoPort=40747, infoSecurePort=0, ipcPort=38839, storageInfo=lv=-57;cid=testClusterID;nsid=1235982065;c=1731992142713), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:55:43,287 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4d8f6a7796cb5eb7 with lease ID 0xe05a4096d0ba6cf0: Processing first storage report for DS-28d48564-144d-4db4-9424-0117ed7ff28f from datanode DatanodeRegistration(127.0.0.1:43811, datanodeUuid=2717070f-ca22-4f46-80fb-eee7927e4ef3, infoPort=40747, infoSecurePort=0, ipcPort=38839, storageInfo=lv=-57;cid=testClusterID;nsid=1235982065;c=1731992142713) 2024-11-19T04:55:43,287 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4d8f6a7796cb5eb7 with lease ID 0xe05a4096d0ba6cf0: from storage DS-28d48564-144d-4db4-9424-0117ed7ff28f node DatanodeRegistration(127.0.0.1:43811, datanodeUuid=2717070f-ca22-4f46-80fb-eee7927e4ef3, infoPort=40747, infoSecurePort=0, ipcPort=38839, storageInfo=lv=-57;cid=testClusterID;nsid=1235982065;c=1731992142713), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:55:43,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:43,316 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@140caf6f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/java.io.tmpdir/jetty-localhost-45957-hadoop-hdfs-3_4_1-tests_jar-_-any-7327619852935354848/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:55:43,317 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1da660ce{HTTP/1.1, (http/1.1)}{localhost:45957} 2024-11-19T04:55:43,317 INFO [Time-limited test {}] server.Server(415): Started @151361ms 2024-11-19T04:55:43,318 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T04:55:43,418 WARN [Thread-1205 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/cluster_14c23cca-1b11-3138-a135-f4ba00ba23c7/data/data3/current/BP-1239689047-172.17.0.2-1731992142713/current, will proceed with Du for space computation calculation, 2024-11-19T04:55:43,418 WARN [Thread-1206 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/cluster_14c23cca-1b11-3138-a135-f4ba00ba23c7/data/data4/current/BP-1239689047-172.17.0.2-1731992142713/current, will proceed with Du for space computation calculation, 2024-11-19T04:55:43,442 WARN [Thread-1194 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T04:55:43,445 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xeea0ff9466877b50 with lease ID 0xe05a4096d0ba6cf1: Processing first storage report for DS-94073cf7-b1ad-41b2-b652-74411d71ed62 from datanode DatanodeRegistration(127.0.0.1:44257, datanodeUuid=498e3122-0852-427c-9e8a-ef967a03db89, infoPort=40955, infoSecurePort=0, ipcPort=42511, storageInfo=lv=-57;cid=testClusterID;nsid=1235982065;c=1731992142713) 2024-11-19T04:55:43,445 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeea0ff9466877b50 with lease ID 0xe05a4096d0ba6cf1: from storage DS-94073cf7-b1ad-41b2-b652-74411d71ed62 node DatanodeRegistration(127.0.0.1:44257, datanodeUuid=498e3122-0852-427c-9e8a-ef967a03db89, infoPort=40955, infoSecurePort=0, ipcPort=42511, storageInfo=lv=-57;cid=testClusterID;nsid=1235982065;c=1731992142713), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:55:43,445 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xeea0ff9466877b50 with lease ID 0xe05a4096d0ba6cf1: Processing first storage report for DS-b9217c57-a20b-456b-ac08-50c1f9c49230 from datanode DatanodeRegistration(127.0.0.1:44257, datanodeUuid=498e3122-0852-427c-9e8a-ef967a03db89, infoPort=40955, infoSecurePort=0, ipcPort=42511, storageInfo=lv=-57;cid=testClusterID;nsid=1235982065;c=1731992142713) 2024-11-19T04:55:43,445 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeea0ff9466877b50 with lease ID 0xe05a4096d0ba6cf1: from storage DS-b9217c57-a20b-456b-ac08-50c1f9c49230 node DatanodeRegistration(127.0.0.1:44257, datanodeUuid=498e3122-0852-427c-9e8a-ef967a03db89, infoPort=40955, infoSecurePort=0, ipcPort=42511, storageInfo=lv=-57;cid=testClusterID;nsid=1235982065;c=1731992142713), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:55:43,546 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6 2024-11-19T04:55:43,549 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/cluster_14c23cca-1b11-3138-a135-f4ba00ba23c7/zookeeper_0, clientPort=52010, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/cluster_14c23cca-1b11-3138-a135-f4ba00ba23c7/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/cluster_14c23cca-1b11-3138-a135-f4ba00ba23c7/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T04:55:43,550 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52010 2024-11-19T04:55:43,550 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:55:43,551 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:55:43,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741825_1001 (size=7) 2024-11-19T04:55:43,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44257 is added to blk_1073741825_1001 (size=7) 2024-11-19T04:55:43,561 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808 with version=8 2024-11-19T04:55:43,561 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/hbase-staging 2024-11-19T04:55:43,564 INFO [Time-limited test {}] client.ConnectionUtils(128): master/08a7f35e60d4:0 server-side Connection retries=45 2024-11-19T04:55:43,564 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T04:55:43,564 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T04:55:43,564 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T04:55:43,564 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T04:55:43,564 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T04:55:43,564 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T04:55:43,564 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T04:55:43,565 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42521 2024-11-19T04:55:43,566 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42521 connecting to ZooKeeper ensemble=127.0.0.1:52010 2024-11-19T04:55:43,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:425210x0, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T04:55:43,572 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42521-0x1012e94b14c0000 connected 2024-11-19T04:55:43,588 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:55:43,590 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:55:43,592 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42521-0x1012e94b14c0000, quorum=127.0.0.1:52010, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T04:55:43,592 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808, hbase.cluster.distributed=false 2024-11-19T04:55:43,594 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42521-0x1012e94b14c0000, quorum=127.0.0.1:52010, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T04:55:43,594 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42521 2024-11-19T04:55:43,595 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42521 2024-11-19T04:55:43,595 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42521 2024-11-19T04:55:43,596 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42521 2024-11-19T04:55:43,596 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42521 2024-11-19T04:55:43,612 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/08a7f35e60d4:0 server-side Connection retries=45 2024-11-19T04:55:43,612 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T04:55:43,612 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T04:55:43,612 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T04:55:43,612 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T04:55:43,612 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T04:55:43,612 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T04:55:43,612 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T04:55:43,613 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45575 2024-11-19T04:55:43,614 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45575 connecting to ZooKeeper ensemble=127.0.0.1:52010 2024-11-19T04:55:43,615 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:55:43,616 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:55:43,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:455750x0, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T04:55:43,621 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:455750x0, quorum=127.0.0.1:52010, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T04:55:43,621 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45575-0x1012e94b14c0001 connected 2024-11-19T04:55:43,621 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T04:55:43,622 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T04:55:43,622 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45575-0x1012e94b14c0001, quorum=127.0.0.1:52010, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T04:55:43,623 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45575-0x1012e94b14c0001, quorum=127.0.0.1:52010, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T04:55:43,624 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45575 2024-11-19T04:55:43,624 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45575 2024-11-19T04:55:43,628 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45575 2024-11-19T04:55:43,629 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45575 2024-11-19T04:55:43,629 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45575 2024-11-19T04:55:43,643 DEBUG [M:0;08a7f35e60d4:42521 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;08a7f35e60d4:42521 2024-11-19T04:55:43,643 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/08a7f35e60d4,42521,1731992143563 2024-11-19T04:55:43,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45575-0x1012e94b14c0001, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T04:55:43,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42521-0x1012e94b14c0000, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T04:55:43,646 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42521-0x1012e94b14c0000, quorum=127.0.0.1:52010, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/08a7f35e60d4,42521,1731992143563 2024-11-19T04:55:43,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45575-0x1012e94b14c0001, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T04:55:43,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42521-0x1012e94b14c0000, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:55:43,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45575-0x1012e94b14c0001, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:55:43,648 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42521-0x1012e94b14c0000, quorum=127.0.0.1:52010, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T04:55:43,648 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/08a7f35e60d4,42521,1731992143563 from backup master directory 2024-11-19T04:55:43,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42521-0x1012e94b14c0000, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/08a7f35e60d4,42521,1731992143563 2024-11-19T04:55:43,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45575-0x1012e94b14c0001, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T04:55:43,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42521-0x1012e94b14c0000, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T04:55:43,650 WARN [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T04:55:43,650 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=08a7f35e60d4,42521,1731992143563 2024-11-19T04:55:43,655 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/hbase.id] with ID: cd951867-a211-43d5-b66b-1dee431193e8 2024-11-19T04:55:43,655 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/.tmp/hbase.id 2024-11-19T04:55:43,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44257 is added to blk_1073741826_1002 (size=42) 2024-11-19T04:55:43,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741826_1002 (size=42) 2024-11-19T04:55:43,661 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/.tmp/hbase.id]:[hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/hbase.id] 2024-11-19T04:55:43,673 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:55:43,673 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T04:55:43,674 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-19T04:55:43,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42521-0x1012e94b14c0000, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:55:43,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45575-0x1012e94b14c0001, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:55:43,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741827_1003 (size=196) 2024-11-19T04:55:43,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44257 is added to blk_1073741827_1003 (size=196) 2024-11-19T04:55:43,682 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T04:55:43,683 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T04:55:43,683 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T04:55:43,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741828_1004 (size=1189) 2024-11-19T04:55:43,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44257 is added to blk_1073741828_1004 (size=1189) 2024-11-19T04:55:43,692 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/data/master/store 2024-11-19T04:55:43,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741829_1005 (size=34) 2024-11-19T04:55:43,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44257 is added to blk_1073741829_1005 (size=34) 2024-11-19T04:55:43,698 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:55:43,698 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T04:55:43,698 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:55:43,698 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:55:43,698 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T04:55:43,698 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:55:43,698 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:55:43,699 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731992143698Disabling compacts and flushes for region at 1731992143698Disabling writes for close at 1731992143698Writing region close event to WAL at 1731992143698Closed at 1731992143698 2024-11-19T04:55:43,699 WARN [master/08a7f35e60d4:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/data/master/store/.initializing 2024-11-19T04:55:43,699 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/WALs/08a7f35e60d4,42521,1731992143563 2024-11-19T04:55:43,702 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=08a7f35e60d4%2C42521%2C1731992143563, suffix=, logDir=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/WALs/08a7f35e60d4,42521,1731992143563, archiveDir=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/oldWALs, maxLogs=10 2024-11-19T04:55:43,702 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C42521%2C1731992143563.1731992143702 2024-11-19T04:55:43,707 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/WALs/08a7f35e60d4,42521,1731992143563/08a7f35e60d4%2C42521%2C1731992143563.1731992143702 2024-11-19T04:55:43,707 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40747:40747),(127.0.0.1/127.0.0.1:40955:40955)] 2024-11-19T04:55:43,708 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T04:55:43,708 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:55:43,708 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:55:43,708 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:55:43,710 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:55:43,711 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T04:55:43,711 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:55:43,711 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:55:43,712 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:55:43,713 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T04:55:43,713 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:55:43,713 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T04:55:43,713 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:55:43,714 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T04:55:43,715 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:55:43,715 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T04:55:43,715 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:55:43,716 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T04:55:43,716 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:55:43,717 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T04:55:43,717 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:55:43,717 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:55:43,718 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:55:43,719 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:55:43,719 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:55:43,720 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T04:55:43,721 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:55:43,723 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T04:55:43,723 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=813362, jitterRate=0.03424437344074249}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T04:55:43,724 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731992143708Initializing all the Stores at 1731992143709 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992143709Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992143709Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992143709Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992143709Cleaning up temporary data from old regions at 1731992143719 (+10 ms)Region opened successfully at 1731992143724 (+5 ms) 2024-11-19T04:55:43,724 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T04:55:43,727 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9becda4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=08a7f35e60d4/172.17.0.2:0 2024-11-19T04:55:43,728 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T04:55:43,728 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T04:55:43,728 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T04:55:43,728 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T04:55:43,729 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-19T04:55:43,729 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-19T04:55:43,729 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T04:55:43,731 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T04:55:43,732 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42521-0x1012e94b14c0000, quorum=127.0.0.1:52010, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T04:55:43,733 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T04:55:43,733 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T04:55:43,734 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42521-0x1012e94b14c0000, quorum=127.0.0.1:52010, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T04:55:43,735 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T04:55:43,735 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T04:55:43,736 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42521-0x1012e94b14c0000, quorum=127.0.0.1:52010, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T04:55:43,738 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T04:55:43,739 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42521-0x1012e94b14c0000, quorum=127.0.0.1:52010, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T04:55:43,740 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T04:55:43,742 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42521-0x1012e94b14c0000, quorum=127.0.0.1:52010, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T04:55:43,743 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T04:55:43,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45575-0x1012e94b14c0001, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T04:55:43,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42521-0x1012e94b14c0000, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T04:55:43,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42521-0x1012e94b14c0000, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:55:43,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45575-0x1012e94b14c0001, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:55:43,746 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=08a7f35e60d4,42521,1731992143563, sessionid=0x1012e94b14c0000, setting cluster-up flag (Was=false) 2024-11-19T04:55:43,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42521-0x1012e94b14c0000, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:55:43,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45575-0x1012e94b14c0001, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:55:43,757 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T04:55:43,758 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=08a7f35e60d4,42521,1731992143563 2024-11-19T04:55:43,761 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45575-0x1012e94b14c0001, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:55:43,761 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42521-0x1012e94b14c0000, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:55:43,767 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T04:55:43,769 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=08a7f35e60d4,42521,1731992143563 2024-11-19T04:55:43,770 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T04:55:43,772 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T04:55:43,772 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T04:55:43,772 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T04:55:43,773 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 08a7f35e60d4,42521,1731992143563 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T04:55:43,774 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/08a7f35e60d4:0, corePoolSize=5, maxPoolSize=5 2024-11-19T04:55:43,774 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/08a7f35e60d4:0, corePoolSize=5, maxPoolSize=5 2024-11-19T04:55:43,774 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/08a7f35e60d4:0, corePoolSize=5, maxPoolSize=5 2024-11-19T04:55:43,774 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/08a7f35e60d4:0, corePoolSize=5, maxPoolSize=5 2024-11-19T04:55:43,774 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/08a7f35e60d4:0, corePoolSize=10, maxPoolSize=10 2024-11-19T04:55:43,774 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:55:43,774 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/08a7f35e60d4:0, corePoolSize=2, maxPoolSize=2 2024-11-19T04:55:43,774 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:55:43,776 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T04:55:43,776 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T04:55:43,777 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:55:43,777 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T04:55:43,778 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731992173778 2024-11-19T04:55:43,778 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T04:55:43,778 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T04:55:43,779 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T04:55:43,779 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T04:55:43,779 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T04:55:43,779 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T04:55:43,779 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T04:55:43,779 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T04:55:43,779 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T04:55:43,779 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T04:55:43,780 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T04:55:43,780 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T04:55:43,780 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.large.0-1731992143780,5,FailOnTimeoutGroup] 2024-11-19T04:55:43,784 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.small.0-1731992143780,5,FailOnTimeoutGroup] 2024-11-19T04:55:43,784 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T04:55:43,784 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T04:55:43,784 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T04:55:43,784 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T04:55:43,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44257 is added to blk_1073741831_1007 (size=1321) 2024-11-19T04:55:43,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741831_1007 (size=1321) 2024-11-19T04:55:43,790 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T04:55:43,790 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808 2024-11-19T04:55:43,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741832_1008 (size=32) 2024-11-19T04:55:43,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44257 is added to blk_1073741832_1008 (size=32) 2024-11-19T04:55:43,798 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:55:43,800 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T04:55:43,801 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T04:55:43,801 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:55:43,802 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:55:43,802 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T04:55:43,803 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T04:55:43,803 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:55:43,803 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:55:43,804 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T04:55:43,805 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T04:55:43,805 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:55:43,805 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:55:43,805 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T04:55:43,806 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T04:55:43,806 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:55:43,807 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:55:43,807 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T04:55:43,808 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/data/hbase/meta/1588230740 2024-11-19T04:55:43,808 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/data/hbase/meta/1588230740 2024-11-19T04:55:43,809 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T04:55:43,809 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T04:55:43,810 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T04:55:43,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T04:55:43,813 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T04:55:43,814 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=859182, jitterRate=0.09250660240650177}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T04:55:43,814 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731992143798Initializing all the Stores at 1731992143799 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992143799Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992143800 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992143800Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992143800Cleaning up temporary data from old regions at 1731992143809 (+9 ms)Region opened successfully at 1731992143814 (+5 ms) 2024-11-19T04:55:43,815 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T04:55:43,815 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T04:55:43,815 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T04:55:43,815 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T04:55:43,815 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T04:55:43,815 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T04:55:43,815 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731992143815Disabling compacts and flushes for region at 1731992143815Disabling writes for close at 1731992143815Writing region close event to WAL at 1731992143815Closed at 1731992143815 2024-11-19T04:55:43,817 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T04:55:43,817 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T04:55:43,817 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T04:55:43,819 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T04:55:43,820 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T04:55:43,832 INFO [RS:0;08a7f35e60d4:45575 {}] regionserver.HRegionServer(746): ClusterId : cd951867-a211-43d5-b66b-1dee431193e8 2024-11-19T04:55:43,832 DEBUG [RS:0;08a7f35e60d4:45575 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T04:55:43,834 DEBUG [RS:0;08a7f35e60d4:45575 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T04:55:43,834 DEBUG [RS:0;08a7f35e60d4:45575 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T04:55:43,836 DEBUG [RS:0;08a7f35e60d4:45575 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T04:55:43,836 DEBUG [RS:0;08a7f35e60d4:45575 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a24908f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=08a7f35e60d4/172.17.0.2:0 2024-11-19T04:55:43,849 DEBUG [RS:0;08a7f35e60d4:45575 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;08a7f35e60d4:45575 2024-11-19T04:55:43,849 INFO [RS:0;08a7f35e60d4:45575 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T04:55:43,849 INFO [RS:0;08a7f35e60d4:45575 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T04:55:43,849 DEBUG [RS:0;08a7f35e60d4:45575 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T04:55:43,850 INFO [RS:0;08a7f35e60d4:45575 {}] regionserver.HRegionServer(2659): reportForDuty to master=08a7f35e60d4,42521,1731992143563 with port=45575, startcode=1731992143611 2024-11-19T04:55:43,850 DEBUG [RS:0;08a7f35e60d4:45575 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T04:55:43,852 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52949, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T04:55:43,853 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42521 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 08a7f35e60d4,45575,1731992143611 2024-11-19T04:55:43,853 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42521 {}] master.ServerManager(517): Registering regionserver=08a7f35e60d4,45575,1731992143611 2024-11-19T04:55:43,855 DEBUG [RS:0;08a7f35e60d4:45575 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808 2024-11-19T04:55:43,855 DEBUG [RS:0;08a7f35e60d4:45575 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42601 2024-11-19T04:55:43,855 DEBUG [RS:0;08a7f35e60d4:45575 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T04:55:43,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42521-0x1012e94b14c0000, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T04:55:43,858 DEBUG [RS:0;08a7f35e60d4:45575 {}] zookeeper.ZKUtil(111): regionserver:45575-0x1012e94b14c0001, quorum=127.0.0.1:52010, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/08a7f35e60d4,45575,1731992143611 2024-11-19T04:55:43,858 WARN [RS:0;08a7f35e60d4:45575 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T04:55:43,858 INFO [RS:0;08a7f35e60d4:45575 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T04:55:43,858 DEBUG [RS:0;08a7f35e60d4:45575 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611 2024-11-19T04:55:43,858 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [08a7f35e60d4,45575,1731992143611] 2024-11-19T04:55:43,861 INFO [RS:0;08a7f35e60d4:45575 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T04:55:43,863 INFO [RS:0;08a7f35e60d4:45575 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T04:55:43,864 INFO [RS:0;08a7f35e60d4:45575 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T04:55:43,864 INFO [RS:0;08a7f35e60d4:45575 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T04:55:43,865 INFO [RS:0;08a7f35e60d4:45575 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T04:55:43,865 INFO [RS:0;08a7f35e60d4:45575 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T04:55:43,865 INFO [RS:0;08a7f35e60d4:45575 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T04:55:43,866 DEBUG [RS:0;08a7f35e60d4:45575 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:55:43,866 DEBUG [RS:0;08a7f35e60d4:45575 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:55:43,866 DEBUG [RS:0;08a7f35e60d4:45575 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:55:43,866 DEBUG [RS:0;08a7f35e60d4:45575 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:55:43,866 DEBUG [RS:0;08a7f35e60d4:45575 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:55:43,866 DEBUG [RS:0;08a7f35e60d4:45575 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/08a7f35e60d4:0, corePoolSize=2, maxPoolSize=2 2024-11-19T04:55:43,866 DEBUG [RS:0;08a7f35e60d4:45575 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:55:43,866 DEBUG [RS:0;08a7f35e60d4:45575 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:55:43,866 DEBUG [RS:0;08a7f35e60d4:45575 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:55:43,866 DEBUG [RS:0;08a7f35e60d4:45575 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:55:43,866 DEBUG [RS:0;08a7f35e60d4:45575 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:55:43,866 DEBUG [RS:0;08a7f35e60d4:45575 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:55:43,866 DEBUG [RS:0;08a7f35e60d4:45575 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/08a7f35e60d4:0, corePoolSize=3, maxPoolSize=3 2024-11-19T04:55:43,866 DEBUG [RS:0;08a7f35e60d4:45575 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0, corePoolSize=3, maxPoolSize=3 2024-11-19T04:55:43,867 INFO [RS:0;08a7f35e60d4:45575 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T04:55:43,867 INFO [RS:0;08a7f35e60d4:45575 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T04:55:43,867 INFO [RS:0;08a7f35e60d4:45575 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T04:55:43,867 INFO [RS:0;08a7f35e60d4:45575 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T04:55:43,867 INFO [RS:0;08a7f35e60d4:45575 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T04:55:43,867 INFO [RS:0;08a7f35e60d4:45575 {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,45575,1731992143611-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T04:55:43,882 INFO [RS:0;08a7f35e60d4:45575 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T04:55:43,882 INFO [RS:0;08a7f35e60d4:45575 {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,45575,1731992143611-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T04:55:43,883 INFO [RS:0;08a7f35e60d4:45575 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:55:43,883 INFO [RS:0;08a7f35e60d4:45575 {}] regionserver.Replication(171): 08a7f35e60d4,45575,1731992143611 started 2024-11-19T04:55:43,897 INFO [RS:0;08a7f35e60d4:45575 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:55:43,897 INFO [RS:0;08a7f35e60d4:45575 {}] regionserver.HRegionServer(1482): Serving as 08a7f35e60d4,45575,1731992143611, RpcServer on 08a7f35e60d4/172.17.0.2:45575, sessionid=0x1012e94b14c0001 2024-11-19T04:55:43,897 DEBUG [RS:0;08a7f35e60d4:45575 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T04:55:43,897 DEBUG [RS:0;08a7f35e60d4:45575 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 08a7f35e60d4,45575,1731992143611 2024-11-19T04:55:43,897 DEBUG [RS:0;08a7f35e60d4:45575 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '08a7f35e60d4,45575,1731992143611' 2024-11-19T04:55:43,897 DEBUG [RS:0;08a7f35e60d4:45575 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T04:55:43,897 DEBUG [RS:0;08a7f35e60d4:45575 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T04:55:43,898 DEBUG [RS:0;08a7f35e60d4:45575 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T04:55:43,898 DEBUG [RS:0;08a7f35e60d4:45575 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T04:55:43,898 DEBUG [RS:0;08a7f35e60d4:45575 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 08a7f35e60d4,45575,1731992143611 2024-11-19T04:55:43,898 DEBUG [RS:0;08a7f35e60d4:45575 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '08a7f35e60d4,45575,1731992143611' 2024-11-19T04:55:43,898 DEBUG [RS:0;08a7f35e60d4:45575 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T04:55:43,898 DEBUG [RS:0;08a7f35e60d4:45575 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T04:55:43,898 DEBUG [RS:0;08a7f35e60d4:45575 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T04:55:43,899 INFO [RS:0;08a7f35e60d4:45575 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T04:55:43,899 INFO [RS:0;08a7f35e60d4:45575 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T04:55:43,970 WARN [08a7f35e60d4:42521 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-19T04:55:44,001 INFO [RS:0;08a7f35e60d4:45575 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=08a7f35e60d4%2C45575%2C1731992143611, suffix=, logDir=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611, archiveDir=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/oldWALs, maxLogs=32 2024-11-19T04:55:44,002 INFO [RS:0;08a7f35e60d4:45575 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C45575%2C1731992143611.1731992144002 2024-11-19T04:55:44,008 INFO [RS:0;08a7f35e60d4:45575 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992144002 2024-11-19T04:55:44,009 DEBUG [RS:0;08a7f35e60d4:45575 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40955:40955),(127.0.0.1/127.0.0.1:40747:40747)] 2024-11-19T04:55:44,095 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:44,220 DEBUG [08a7f35e60d4:42521 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T04:55:44,221 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=08a7f35e60d4,45575,1731992143611 2024-11-19T04:55:44,223 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 08a7f35e60d4,45575,1731992143611, state=OPENING 2024-11-19T04:55:44,224 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T04:55:44,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45575-0x1012e94b14c0001, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:55:44,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42521-0x1012e94b14c0000, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:55:44,226 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T04:55:44,226 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T04:55:44,226 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=08a7f35e60d4,45575,1731992143611}] 2024-11-19T04:55:44,227 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T04:55:44,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:44,380 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T04:55:44,382 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58497, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T04:55:44,385 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T04:55:44,386 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T04:55:44,387 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=08a7f35e60d4%2C45575%2C1731992143611.meta, suffix=.meta, logDir=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611, archiveDir=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/oldWALs, maxLogs=32 2024-11-19T04:55:44,388 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C45575%2C1731992143611.meta.1731992144388.meta 2024-11-19T04:55:44,393 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.meta.1731992144388.meta 2024-11-19T04:55:44,393 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40747:40747),(127.0.0.1/127.0.0.1:40955:40955)] 2024-11-19T04:55:44,394 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T04:55:44,395 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T04:55:44,395 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T04:55:44,395 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T04:55:44,395 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T04:55:44,395 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:55:44,395 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T04:55:44,395 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T04:55:44,396 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T04:55:44,397 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T04:55:44,397 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:55:44,398 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:55:44,398 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T04:55:44,398 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T04:55:44,398 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:55:44,399 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:55:44,399 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T04:55:44,400 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T04:55:44,400 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:55:44,400 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:55:44,400 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T04:55:44,401 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T04:55:44,401 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:55:44,401 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:55:44,401 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T04:55:44,402 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/data/hbase/meta/1588230740 2024-11-19T04:55:44,403 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/data/hbase/meta/1588230740 2024-11-19T04:55:44,404 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T04:55:44,404 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T04:55:44,404 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T04:55:44,406 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T04:55:44,406 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=744280, jitterRate=-0.0536002516746521}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T04:55:44,406 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T04:55:44,407 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731992144395Writing region info on filesystem at 1731992144395Initializing all the Stores at 1731992144396 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992144396Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992144396Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992144396Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992144396Cleaning up temporary data from old regions at 1731992144404 (+8 ms)Running coprocessor post-open hooks at 1731992144406 (+2 ms)Region opened successfully at 1731992144407 (+1 ms) 2024-11-19T04:55:44,408 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731992144380 2024-11-19T04:55:44,411 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T04:55:44,411 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T04:55:44,412 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=08a7f35e60d4,45575,1731992143611 2024-11-19T04:55:44,413 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 08a7f35e60d4,45575,1731992143611, state=OPEN 2024-11-19T04:55:44,419 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45575-0x1012e94b14c0001, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T04:55:44,419 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42521-0x1012e94b14c0000, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T04:55:44,419 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=08a7f35e60d4,45575,1731992143611 2024-11-19T04:55:44,419 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T04:55:44,419 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T04:55:44,422 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T04:55:44,422 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=08a7f35e60d4,45575,1731992143611 in 193 msec 2024-11-19T04:55:44,425 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T04:55:44,425 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 605 msec 2024-11-19T04:55:44,426 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T04:55:44,426 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T04:55:44,427 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T04:55:44,427 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=08a7f35e60d4,45575,1731992143611, seqNum=-1] 2024-11-19T04:55:44,427 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T04:55:44,429 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58289, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T04:55:44,434 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 662 msec 2024-11-19T04:55:44,434 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731992144434, completionTime=-1 2024-11-19T04:55:44,434 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T04:55:44,434 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-19T04:55:44,436 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-19T04:55:44,436 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731992204436 2024-11-19T04:55:44,436 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731992264436 2024-11-19T04:55:44,436 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-19T04:55:44,436 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,42521,1731992143563-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T04:55:44,437 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,42521,1731992143563-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:55:44,437 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,42521,1731992143563-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:55:44,437 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-08a7f35e60d4:42521, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:55:44,437 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T04:55:44,437 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T04:55:44,438 DEBUG [master/08a7f35e60d4:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T04:55:44,440 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.790sec 2024-11-19T04:55:44,440 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T04:55:44,440 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T04:55:44,440 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T04:55:44,440 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T04:55:44,440 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T04:55:44,440 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,42521,1731992143563-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T04:55:44,441 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,42521,1731992143563-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T04:55:44,443 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T04:55:44,443 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T04:55:44,443 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,42521,1731992143563-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:55:44,533 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40375d59, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T04:55:44,533 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 08a7f35e60d4,42521,-1 for getting cluster id 2024-11-19T04:55:44,533 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T04:55:44,540 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'cd951867-a211-43d5-b66b-1dee431193e8' 2024-11-19T04:55:44,540 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T04:55:44,540 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "cd951867-a211-43d5-b66b-1dee431193e8" 2024-11-19T04:55:44,541 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29809f10, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T04:55:44,541 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [08a7f35e60d4,42521,-1] 2024-11-19T04:55:44,541 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T04:55:44,542 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:55:44,543 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60860, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T04:55:44,544 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6144f613, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T04:55:44,544 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T04:55:44,545 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=08a7f35e60d4,45575,1731992143611, seqNum=-1] 2024-11-19T04:55:44,546 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T04:55:44,547 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53528, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T04:55:44,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=08a7f35e60d4,42521,1731992143563 2024-11-19T04:55:44,549 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:55:44,552 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-19T04:55:44,552 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-19T04:55:44,552 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-19T04:55:44,552 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-19T04:55:44,553 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 08a7f35e60d4,42521,1731992143563 2024-11-19T04:55:44,553 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@68a66834 2024-11-19T04:55:44,553 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-19T04:55:44,555 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60872, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-19T04:55:44,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42521 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-19T04:55:44,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42521 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-19T04:55:44,556 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42521 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T04:55:44,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42521 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-19T04:55:44,558 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-19T04:55:44,558 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:55:44,558 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42521 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-19T04:55:44,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42521 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T04:55:44,560 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-19T04:55:44,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44257 is added to blk_1073741835_1011 (size=395) 2024-11-19T04:55:44,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741835_1011 (size=395) 2024-11-19T04:55:44,568 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 136e0d74fa1d678f94528dcfc60180c9, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731992144555.136e0d74fa1d678f94528dcfc60180c9.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808 2024-11-19T04:55:44,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741836_1012 (size=78) 2024-11-19T04:55:44,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44257 is added to blk_1073741836_1012 (size=78) 2024-11-19T04:55:44,575 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731992144555.136e0d74fa1d678f94528dcfc60180c9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:55:44,575 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 136e0d74fa1d678f94528dcfc60180c9, disabling compactions & flushes 2024-11-19T04:55:44,575 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731992144555.136e0d74fa1d678f94528dcfc60180c9. 2024-11-19T04:55:44,575 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731992144555.136e0d74fa1d678f94528dcfc60180c9. 2024-11-19T04:55:44,575 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731992144555.136e0d74fa1d678f94528dcfc60180c9. after waiting 0 ms 2024-11-19T04:55:44,575 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731992144555.136e0d74fa1d678f94528dcfc60180c9. 2024-11-19T04:55:44,575 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731992144555.136e0d74fa1d678f94528dcfc60180c9. 2024-11-19T04:55:44,575 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 136e0d74fa1d678f94528dcfc60180c9: Waiting for close lock at 1731992144575Disabling compacts and flushes for region at 1731992144575Disabling writes for close at 1731992144575Writing region close event to WAL at 1731992144575Closed at 1731992144575 2024-11-19T04:55:44,576 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-19T04:55:44,577 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731992144555.136e0d74fa1d678f94528dcfc60180c9.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731992144576"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731992144576"}]},"ts":"1731992144576"} 2024-11-19T04:55:44,579 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-19T04:55:44,580 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-19T04:55:44,581 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731992144581"}]},"ts":"1731992144581"} 2024-11-19T04:55:44,583 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-19T04:55:44,583 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=136e0d74fa1d678f94528dcfc60180c9, ASSIGN}] 2024-11-19T04:55:44,584 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=136e0d74fa1d678f94528dcfc60180c9, ASSIGN 2024-11-19T04:55:44,585 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=136e0d74fa1d678f94528dcfc60180c9, ASSIGN; state=OFFLINE, location=08a7f35e60d4,45575,1731992143611; forceNewPlan=false, retain=false 2024-11-19T04:55:44,736 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=136e0d74fa1d678f94528dcfc60180c9, regionState=OPENING, regionLocation=08a7f35e60d4,45575,1731992143611 2024-11-19T04:55:44,739 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=136e0d74fa1d678f94528dcfc60180c9, ASSIGN because future has completed 2024-11-19T04:55:44,740 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 136e0d74fa1d678f94528dcfc60180c9, server=08a7f35e60d4,45575,1731992143611}] 2024-11-19T04:55:44,896 INFO [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731992144555.136e0d74fa1d678f94528dcfc60180c9. 2024-11-19T04:55:44,897 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 136e0d74fa1d678f94528dcfc60180c9, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731992144555.136e0d74fa1d678f94528dcfc60180c9.', STARTKEY => '', ENDKEY => ''} 2024-11-19T04:55:44,897 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 136e0d74fa1d678f94528dcfc60180c9 2024-11-19T04:55:44,897 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731992144555.136e0d74fa1d678f94528dcfc60180c9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:55:44,897 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 136e0d74fa1d678f94528dcfc60180c9 2024-11-19T04:55:44,897 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 136e0d74fa1d678f94528dcfc60180c9 2024-11-19T04:55:44,898 INFO [StoreOpener-136e0d74fa1d678f94528dcfc60180c9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 136e0d74fa1d678f94528dcfc60180c9 2024-11-19T04:55:44,900 INFO [StoreOpener-136e0d74fa1d678f94528dcfc60180c9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 136e0d74fa1d678f94528dcfc60180c9 columnFamilyName info 2024-11-19T04:55:44,900 DEBUG [StoreOpener-136e0d74fa1d678f94528dcfc60180c9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:55:44,900 INFO [StoreOpener-136e0d74fa1d678f94528dcfc60180c9-1 {}] regionserver.HStore(327): Store=136e0d74fa1d678f94528dcfc60180c9/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T04:55:44,900 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 136e0d74fa1d678f94528dcfc60180c9 2024-11-19T04:55:44,901 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/data/default/TestLogRolling-testLogRollOnPipelineRestart/136e0d74fa1d678f94528dcfc60180c9 2024-11-19T04:55:44,902 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/data/default/TestLogRolling-testLogRollOnPipelineRestart/136e0d74fa1d678f94528dcfc60180c9 2024-11-19T04:55:44,902 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 136e0d74fa1d678f94528dcfc60180c9 2024-11-19T04:55:44,902 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 136e0d74fa1d678f94528dcfc60180c9 2024-11-19T04:55:44,904 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 136e0d74fa1d678f94528dcfc60180c9 2024-11-19T04:55:44,906 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/data/default/TestLogRolling-testLogRollOnPipelineRestart/136e0d74fa1d678f94528dcfc60180c9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T04:55:44,907 INFO [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 136e0d74fa1d678f94528dcfc60180c9; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=775082, jitterRate=-0.014432832598686218}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T04:55:44,907 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 136e0d74fa1d678f94528dcfc60180c9 2024-11-19T04:55:44,908 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 136e0d74fa1d678f94528dcfc60180c9: Running coprocessor pre-open hook at 1731992144897Writing region info on filesystem at 1731992144897Initializing all the Stores at 1731992144898 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992144898Cleaning up temporary data from old regions at 1731992144902 (+4 ms)Running coprocessor post-open hooks at 1731992144907 (+5 ms)Region opened successfully at 1731992144908 (+1 ms) 2024-11-19T04:55:44,909 INFO [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731992144555.136e0d74fa1d678f94528dcfc60180c9., pid=6, masterSystemTime=1731992144892 2024-11-19T04:55:44,911 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731992144555.136e0d74fa1d678f94528dcfc60180c9. 2024-11-19T04:55:44,911 INFO [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731992144555.136e0d74fa1d678f94528dcfc60180c9. 2024-11-19T04:55:44,912 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=136e0d74fa1d678f94528dcfc60180c9, regionState=OPEN, openSeqNum=2, regionLocation=08a7f35e60d4,45575,1731992143611 2024-11-19T04:55:44,915 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 136e0d74fa1d678f94528dcfc60180c9, server=08a7f35e60d4,45575,1731992143611 because future has completed 2024-11-19T04:55:44,919 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-19T04:55:44,919 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 136e0d74fa1d678f94528dcfc60180c9, server=08a7f35e60d4,45575,1731992143611 in 177 msec 2024-11-19T04:55:44,922 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-19T04:55:44,922 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=136e0d74fa1d678f94528dcfc60180c9, ASSIGN in 336 msec 2024-11-19T04:55:44,923 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-19T04:55:44,923 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731992144923"}]},"ts":"1731992144923"} 2024-11-19T04:55:44,925 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-19T04:55:44,926 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-19T04:55:44,928 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 370 msec 2024-11-19T04:55:45,096 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:45,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:46,097 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:46,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:47,097 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:47,146 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T04:55:47,146 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-19T04:55:47,147 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-19T04:55:47,147 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-19T04:55:47,148 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T04:55:47,148 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-19T04:55:47,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:48,098 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:48,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:49,098 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:49,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:49,897 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T04:55:49,913 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:55:49,914 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:55:49,914 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:55:49,914 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:55:49,914 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:55:49,915 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:55:49,918 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:55:49,920 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:55:49,925 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-19T04:55:49,925 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-19T04:55:50,099 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:50,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:51,100 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:51,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:52,100 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:52,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:53,101 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:53,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:54,102 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:54,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:54,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42521 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T04:55:54,629 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-19T04:55:54,629 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-19T04:55:54,633 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-19T04:55:54,633 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731992144555.136e0d74fa1d678f94528dcfc60180c9. 2024-11-19T04:55:54,637 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731992144555.136e0d74fa1d678f94528dcfc60180c9., hostname=08a7f35e60d4,45575,1731992143611, seqNum=2] 2024-11-19T04:55:55,102 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:55,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:56,103 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:56,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:56,640 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992144002 2024-11-19T04:55:56,641 WARN [ResponseProcessor for block BP-1239689047-172.17.0.2-1731992142713:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1239689047-172.17.0.2-1731992142713:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:56,641 WARN [ResponseProcessor for block BP-1239689047-172.17.0.2-1731992142713:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1239689047-172.17.0.2-1731992142713:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1239689047-172.17.0.2-1731992142713:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:44257,DS-94073cf7-b1ad-41b2-b652-74411d71ed62,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:56,641 WARN [ResponseProcessor for block BP-1239689047-172.17.0.2-1731992142713:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1239689047-172.17.0.2-1731992142713:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1239689047-172.17.0.2-1731992142713:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:44257,DS-94073cf7-b1ad-41b2-b652-74411d71ed62,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:56,642 WARN [DataStreamer for file /user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992144002 block BP-1239689047-172.17.0.2-1731992142713:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1239689047-172.17.0.2-1731992142713:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44257,DS-94073cf7-b1ad-41b2-b652-74411d71ed62,DISK], DatanodeInfoWithStorage[127.0.0.1:43811,DS-5f22df2c-221c-448e-9d00-366dafd2a73a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44257,DS-94073cf7-b1ad-41b2-b652-74411d71ed62,DISK]) is bad. 2024-11-19T04:55:56,642 WARN [DataStreamer for file /user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.meta.1731992144388.meta block BP-1239689047-172.17.0.2-1731992142713:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1239689047-172.17.0.2-1731992142713:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43811,DS-5f22df2c-221c-448e-9d00-366dafd2a73a,DISK], DatanodeInfoWithStorage[127.0.0.1:44257,DS-94073cf7-b1ad-41b2-b652-74411d71ed62,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44257,DS-94073cf7-b1ad-41b2-b652-74411d71ed62,DISK]) is bad. 2024-11-19T04:55:56,642 WARN [DataStreamer for file /user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/WALs/08a7f35e60d4,42521,1731992143563/08a7f35e60d4%2C42521%2C1731992143563.1731992143702 block BP-1239689047-172.17.0.2-1731992142713:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1239689047-172.17.0.2-1731992142713:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43811,DS-5f22df2c-221c-448e-9d00-366dafd2a73a,DISK], DatanodeInfoWithStorage[127.0.0.1:44257,DS-94073cf7-b1ad-41b2-b652-74411d71ed62,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44257,DS-94073cf7-b1ad-41b2-b652-74411d71ed62,DISK]) is bad. 2024-11-19T04:55:56,642 WARN [PacketResponder: BP-1239689047-172.17.0.2-1731992142713:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:44257] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:56,642 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-418324952_22 at /127.0.0.1:57082 [Receiving block BP-1239689047-172.17.0.2-1731992142713:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:43811:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57082 dst: /127.0.0.1:43811 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:56,642 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2129020850_22 at /127.0.0.1:57114 [Receiving block BP-1239689047-172.17.0.2-1731992142713:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:43811:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57114 dst: /127.0.0.1:43811 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:56,642 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2129020850_22 at /127.0.0.1:57104 [Receiving block BP-1239689047-172.17.0.2-1731992142713:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:43811:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57104 dst: /127.0.0.1:43811 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:56,642 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2129020850_22 at /127.0.0.1:36418 [Receiving block BP-1239689047-172.17.0.2-1731992142713:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44257:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36418 dst: /127.0.0.1:44257 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:56,643 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2129020850_22 at /127.0.0.1:36426 [Receiving block BP-1239689047-172.17.0.2-1731992142713:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44257:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36426 dst: /127.0.0.1:44257 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:56,643 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-418324952_22 at /127.0.0.1:36384 [Receiving block BP-1239689047-172.17.0.2-1731992142713:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44257:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36384 dst: /127.0.0.1:44257 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:56,644 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@140caf6f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:55:56,644 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1da660ce{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T04:55:56,644 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T04:55:56,645 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3fd7563{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T04:55:56,645 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11255fea{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/hadoop.log.dir/,STOPPED} 2024-11-19T04:55:56,646 WARN [BP-1239689047-172.17.0.2-1731992142713 heartbeating to localhost/127.0.0.1:42601 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T04:55:56,647 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T04:55:56,647 WARN [BP-1239689047-172.17.0.2-1731992142713 heartbeating to localhost/127.0.0.1:42601 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1239689047-172.17.0.2-1731992142713 (Datanode Uuid 498e3122-0852-427c-9e8a-ef967a03db89) service to localhost/127.0.0.1:42601 2024-11-19T04:55:56,647 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T04:55:56,647 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/cluster_14c23cca-1b11-3138-a135-f4ba00ba23c7/data/data3/current/BP-1239689047-172.17.0.2-1731992142713 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:55:56,647 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/cluster_14c23cca-1b11-3138-a135-f4ba00ba23c7/data/data4/current/BP-1239689047-172.17.0.2-1731992142713 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:55:56,648 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T04:55:56,656 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:55:56,659 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T04:55:56,660 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T04:55:56,660 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T04:55:56,660 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T04:55:56,661 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5960bf29{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/hadoop.log.dir/,AVAILABLE} 2024-11-19T04:55:56,661 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c07ac8c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T04:55:56,775 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6a4031cc{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/java.io.tmpdir/jetty-localhost-45093-hadoop-hdfs-3_4_1-tests_jar-_-any-4222862211383413072/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:55:56,775 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@126bd190{HTTP/1.1, (http/1.1)}{localhost:45093} 2024-11-19T04:55:56,776 INFO [Time-limited test {}] server.Server(415): Started @164820ms 2024-11-19T04:55:56,777 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T04:55:56,794 WARN [ResponseProcessor for block BP-1239689047-172.17.0.2-1731992142713:blk_1073741833_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1239689047-172.17.0.2-1731992142713:blk_1073741833_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:56,794 WARN [ResponseProcessor for block BP-1239689047-172.17.0.2-1731992142713:blk_1073741834_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1239689047-172.17.0.2-1731992142713:blk_1073741834_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:56,794 WARN [ResponseProcessor for block BP-1239689047-172.17.0.2-1731992142713:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1239689047-172.17.0.2-1731992142713:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:56,795 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2129020850_22 at /127.0.0.1:60480 [Receiving block BP-1239689047-172.17.0.2-1731992142713:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:43811:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60480 dst: /127.0.0.1:43811 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:56,795 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-418324952_22 at /127.0.0.1:60464 [Receiving block BP-1239689047-172.17.0.2-1731992142713:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:43811:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60464 dst: /127.0.0.1:43811 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:56,796 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2129020850_22 at /127.0.0.1:60486 [Receiving block BP-1239689047-172.17.0.2-1731992142713:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:43811:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60486 dst: /127.0.0.1:43811 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:55:56,801 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6f0827f7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:55:56,801 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2744dc92{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T04:55:56,801 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T04:55:56,802 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@165d0fad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T04:55:56,802 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25f63c50{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/hadoop.log.dir/,STOPPED} 2024-11-19T04:55:56,803 WARN [BP-1239689047-172.17.0.2-1731992142713 heartbeating to localhost/127.0.0.1:42601 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T04:55:56,803 WARN [BP-1239689047-172.17.0.2-1731992142713 heartbeating to localhost/127.0.0.1:42601 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1239689047-172.17.0.2-1731992142713 (Datanode Uuid 2717070f-ca22-4f46-80fb-eee7927e4ef3) service to localhost/127.0.0.1:42601 2024-11-19T04:55:56,803 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T04:55:56,803 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T04:55:56,804 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/cluster_14c23cca-1b11-3138-a135-f4ba00ba23c7/data/data1/current/BP-1239689047-172.17.0.2-1731992142713 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:55:56,804 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/cluster_14c23cca-1b11-3138-a135-f4ba00ba23c7/data/data2/current/BP-1239689047-172.17.0.2-1731992142713 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:55:56,804 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T04:55:56,816 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:55:56,819 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T04:55:56,820 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T04:55:56,820 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T04:55:56,820 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T04:55:56,821 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@176f5faa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/hadoop.log.dir/,AVAILABLE} 2024-11-19T04:55:56,821 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3237c5f1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T04:55:56,868 WARN [Thread-1329 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T04:55:56,871 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x96a671df0e17041b with lease ID 0xe05a4096d0ba6cf2: from storage DS-94073cf7-b1ad-41b2-b652-74411d71ed62 node DatanodeRegistration(127.0.0.1:36987, datanodeUuid=498e3122-0852-427c-9e8a-ef967a03db89, infoPort=38595, infoSecurePort=0, ipcPort=33275, storageInfo=lv=-57;cid=testClusterID;nsid=1235982065;c=1731992142713), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:55:56,871 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x96a671df0e17041b with lease ID 0xe05a4096d0ba6cf2: from storage DS-b9217c57-a20b-456b-ac08-50c1f9c49230 node DatanodeRegistration(127.0.0.1:36987, datanodeUuid=498e3122-0852-427c-9e8a-ef967a03db89, infoPort=38595, infoSecurePort=0, ipcPort=33275, storageInfo=lv=-57;cid=testClusterID;nsid=1235982065;c=1731992142713), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:55:56,936 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@358d2587{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/java.io.tmpdir/jetty-localhost-40701-hadoop-hdfs-3_4_1-tests_jar-_-any-12390607276110256727/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:55:56,936 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2e49578b{HTTP/1.1, (http/1.1)}{localhost:40701} 2024-11-19T04:55:56,937 INFO [Time-limited test {}] server.Server(415): Started @164981ms 2024-11-19T04:55:56,938 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T04:55:57,031 WARN [Thread-1360 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T04:55:57,034 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x24d359b8694f2e54 with lease ID 0xe05a4096d0ba6cf3: from storage DS-5f22df2c-221c-448e-9d00-366dafd2a73a node DatanodeRegistration(127.0.0.1:35391, datanodeUuid=2717070f-ca22-4f46-80fb-eee7927e4ef3, infoPort=36047, infoSecurePort=0, ipcPort=40953, storageInfo=lv=-57;cid=testClusterID;nsid=1235982065;c=1731992142713), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:55:57,034 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x24d359b8694f2e54 with lease ID 0xe05a4096d0ba6cf3: from storage DS-28d48564-144d-4db4-9424-0117ed7ff28f node DatanodeRegistration(127.0.0.1:35391, datanodeUuid=2717070f-ca22-4f46-80fb-eee7927e4ef3, infoPort=36047, infoSecurePort=0, ipcPort=40953, storageInfo=lv=-57;cid=testClusterID;nsid=1235982065;c=1731992142713), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:55:57,103 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:57,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:57,957 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-19T04:55:57,959 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-19T04:55:57,961 ERROR [FSHLog-0-hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808-prefix:08a7f35e60d4,45575,1731992143611 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43811,DS-5f22df2c-221c-448e-9d00-366dafd2a73a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:57,961 WARN [FSHLog-0-hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808-prefix:08a7f35e60d4,45575,1731992143611 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43811,DS-5f22df2c-221c-448e-9d00-366dafd2a73a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:57,961 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 08a7f35e60d4%2C45575%2C1731992143611:(num 1731992144002) roll requested 2024-11-19T04:55:57,961 INFO [regionserver/08a7f35e60d4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C45575%2C1731992143611.1731992157961 2024-11-19T04:55:57,967 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992144002 newFile=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992157961 2024-11-19T04:55:57,967 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:57,967 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:57,967 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:57,967 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:57,967 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:55:57,968 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992144002 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992157961 2024-11-19T04:55:57,968 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43811,DS-5f22df2c-221c-448e-9d00-366dafd2a73a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:57,968 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43811,DS-5f22df2c-221c-448e-9d00-366dafd2a73a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:55:57,968 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992144002 2024-11-19T04:55:57,969 WARN [IPC Server handler 2 on default port 42601 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992144002 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1015 2024-11-19T04:55:57,969 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36047:36047),(127.0.0.1/127.0.0.1:38595:38595)] 2024-11-19T04:55:57,969 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992144002 is not closed yet, will try archiving it next time 2024-11-19T04:55:57,969 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992144002 after 1ms 2024-11-19T04:55:58,104 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:58,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:59,105 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:59,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:55:59,972 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-19T04:56:00,105 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:00,327 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:01,106 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:01,327 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:01,870 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1015: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-19T04:56:01,970 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992144002 after 4002ms 2024-11-19T04:56:01,975 WARN [ResponseProcessor for block BP-1239689047-172.17.0.2-1731992142713:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1239689047-172.17.0.2-1731992142713:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:56:01,975 WARN [DataStreamer for file /user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992157961 block BP-1239689047-172.17.0.2-1731992142713:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1239689047-172.17.0.2-1731992142713:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35391,DS-5f22df2c-221c-448e-9d00-366dafd2a73a,DISK], DatanodeInfoWithStorage[127.0.0.1:36987,DS-94073cf7-b1ad-41b2-b652-74411d71ed62,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35391,DS-5f22df2c-221c-448e-9d00-366dafd2a73a,DISK]) is bad. 2024-11-19T04:56:01,975 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2129020850_22 at /127.0.0.1:35232 [Receiving block BP-1239689047-172.17.0.2-1731992142713:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:35391:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35232 dst: /127.0.0.1:35391 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:56:01,976 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2129020850_22 at /127.0.0.1:41128 [Receiving block BP-1239689047-172.17.0.2-1731992142713:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:36987:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41128 dst: /127.0.0.1:36987 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:56:01,977 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@358d2587{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:56:01,977 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2e49578b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T04:56:01,977 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T04:56:01,977 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3237c5f1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T04:56:01,977 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@176f5faa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/hadoop.log.dir/,STOPPED} 2024-11-19T04:56:01,978 WARN [BP-1239689047-172.17.0.2-1731992142713 heartbeating to localhost/127.0.0.1:42601 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T04:56:01,978 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T04:56:01,978 WARN [BP-1239689047-172.17.0.2-1731992142713 heartbeating to localhost/127.0.0.1:42601 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1239689047-172.17.0.2-1731992142713 (Datanode Uuid 2717070f-ca22-4f46-80fb-eee7927e4ef3) service to localhost/127.0.0.1:42601 2024-11-19T04:56:01,978 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T04:56:01,979 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/cluster_14c23cca-1b11-3138-a135-f4ba00ba23c7/data/data1/current/BP-1239689047-172.17.0.2-1731992142713 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:56:01,979 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/cluster_14c23cca-1b11-3138-a135-f4ba00ba23c7/data/data2/current/BP-1239689047-172.17.0.2-1731992142713 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:56:01,979 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T04:56:01,988 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:56:01,991 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T04:56:01,993 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T04:56:01,993 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T04:56:01,993 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T04:56:01,993 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@718ea2f4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/hadoop.log.dir/,AVAILABLE} 2024-11-19T04:56:01,994 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@167fd01b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T04:56:02,106 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:02,107 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2ca8564b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/java.io.tmpdir/jetty-localhost-38827-hadoop-hdfs-3_4_1-tests_jar-_-any-1397849768496781094/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:56:02,107 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@fa662a{HTTP/1.1, (http/1.1)}{localhost:38827} 2024-11-19T04:56:02,107 INFO [Time-limited test {}] server.Server(415): Started @170152ms 2024-11-19T04:56:02,109 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T04:56:02,130 WARN [ResponseProcessor for block BP-1239689047-172.17.0.2-1731992142713:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1239689047-172.17.0.2-1731992142713:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:56:02,130 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2129020850_22 at /127.0.0.1:35842 [Receiving block BP-1239689047-172.17.0.2-1731992142713:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:36987:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35842 dst: /127.0.0.1:36987 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:56:02,134 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6a4031cc{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:56:02,134 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@126bd190{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T04:56:02,134 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T04:56:02,134 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c07ac8c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T04:56:02,134 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5960bf29{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/hadoop.log.dir/,STOPPED} 2024-11-19T04:56:02,136 WARN [BP-1239689047-172.17.0.2-1731992142713 heartbeating to localhost/127.0.0.1:42601 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T04:56:02,136 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T04:56:02,136 WARN [BP-1239689047-172.17.0.2-1731992142713 heartbeating to localhost/127.0.0.1:42601 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1239689047-172.17.0.2-1731992142713 (Datanode Uuid 498e3122-0852-427c-9e8a-ef967a03db89) service to localhost/127.0.0.1:42601 2024-11-19T04:56:02,136 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T04:56:02,136 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/cluster_14c23cca-1b11-3138-a135-f4ba00ba23c7/data/data3/current/BP-1239689047-172.17.0.2-1731992142713 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:56:02,137 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/cluster_14c23cca-1b11-3138-a135-f4ba00ba23c7/data/data4/current/BP-1239689047-172.17.0.2-1731992142713 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:56:02,137 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T04:56:02,150 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:56:02,155 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T04:56:02,156 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T04:56:02,156 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T04:56:02,156 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T04:56:02,157 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@760c54cf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/hadoop.log.dir/,AVAILABLE} 2024-11-19T04:56:02,157 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2026736d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T04:56:02,221 WARN [Thread-1403 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T04:56:02,257 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbfb304bfb22fd0f1 with lease ID 0xe05a4096d0ba6cf4: from storage DS-5f22df2c-221c-448e-9d00-366dafd2a73a node DatanodeRegistration(127.0.0.1:43143, datanodeUuid=2717070f-ca22-4f46-80fb-eee7927e4ef3, infoPort=32961, infoSecurePort=0, ipcPort=41909, storageInfo=lv=-57;cid=testClusterID;nsid=1235982065;c=1731992142713), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:56:02,257 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbfb304bfb22fd0f1 with lease ID 0xe05a4096d0ba6cf4: from storage DS-28d48564-144d-4db4-9424-0117ed7ff28f node DatanodeRegistration(127.0.0.1:43143, datanodeUuid=2717070f-ca22-4f46-80fb-eee7927e4ef3, infoPort=32961, infoSecurePort=0, ipcPort=41909, storageInfo=lv=-57;cid=testClusterID;nsid=1235982065;c=1731992142713), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:56:02,315 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@528eeea6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/java.io.tmpdir/jetty-localhost-40013-hadoop-hdfs-3_4_1-tests_jar-_-any-12090184160573872924/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:56:02,316 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@20466b6{HTTP/1.1, (http/1.1)}{localhost:40013} 2024-11-19T04:56:02,316 INFO [Time-limited test {}] server.Server(415): Started @170361ms 2024-11-19T04:56:02,318 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T04:56:02,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:02,424 WARN [Thread-1434 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T04:56:02,426 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2d6d84473f6d8ec6 with lease ID 0xe05a4096d0ba6cf5: from storage DS-94073cf7-b1ad-41b2-b652-74411d71ed62 node DatanodeRegistration(127.0.0.1:40403, datanodeUuid=498e3122-0852-427c-9e8a-ef967a03db89, infoPort=45245, infoSecurePort=0, ipcPort=36527, storageInfo=lv=-57;cid=testClusterID;nsid=1235982065;c=1731992142713), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:56:02,427 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2d6d84473f6d8ec6 with lease ID 0xe05a4096d0ba6cf5: from storage DS-b9217c57-a20b-456b-ac08-50c1f9c49230 node DatanodeRegistration(127.0.0.1:40403, datanodeUuid=498e3122-0852-427c-9e8a-ef967a03db89, infoPort=45245, infoSecurePort=0, ipcPort=36527, storageInfo=lv=-57;cid=testClusterID;nsid=1235982065;c=1731992142713), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:56:03,107 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:03,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:03,340 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-19T04:56:03,342 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-19T04:56:03,344 ERROR [FSHLog-0-hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808-prefix:08a7f35e60d4,45575,1731992143611 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36987,DS-94073cf7-b1ad-41b2-b652-74411d71ed62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:56:03,344 WARN [FSHLog-0-hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808-prefix:08a7f35e60d4,45575,1731992143611 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36987,DS-94073cf7-b1ad-41b2-b652-74411d71ed62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:56:03,344 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 08a7f35e60d4%2C45575%2C1731992143611:(num 1731992157961) roll requested 2024-11-19T04:56:03,344 INFO [regionserver/08a7f35e60d4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C45575%2C1731992143611.1731992163344 2024-11-19T04:56:03,350 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992157961 newFile=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992163344 2024-11-19T04:56:03,350 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:03,350 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:03,350 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:03,350 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:03,350 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:03,350 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992157961 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992163344 2024-11-19T04:56:03,351 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36987,DS-94073cf7-b1ad-41b2-b652-74411d71ed62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:56:03,351 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36987,DS-94073cf7-b1ad-41b2-b652-74411d71ed62,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:56:03,351 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992157961 2024-11-19T04:56:03,351 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45245:45245),(127.0.0.1/127.0.0.1:32961:32961)] 2024-11-19T04:56:03,351 WARN [IPC Server handler 1 on default port 42601 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992157961 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-19T04:56:03,351 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992157961 is not closed yet, will try archiving it next time 2024-11-19T04:56:03,351 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992157961 after 0ms 2024-11-19T04:56:04,108 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:04,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:05,109 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:05,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:05,353 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C45575%2C1731992143611.1731992165353 2024-11-19T04:56:05,359 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992163344 newFile=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992165353 2024-11-19T04:56:05,359 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:05,360 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:05,360 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:05,360 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:05,360 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:05,360 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992163344 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992165353 2024-11-19T04:56:05,364 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32961:32961),(127.0.0.1/127.0.0.1:45245:45245)] 2024-11-19T04:56:05,364 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992157961 is not closed yet, will try archiving it next time 2024-11-19T04:56:05,364 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992163344 is not closed yet, will try archiving it next time 2024-11-19T04:56:05,365 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992144002 2024-11-19T04:56:05,365 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992144002 2024-11-19T04:56:05,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43143 is added to blk_1073741838_1019 (size=1264) 2024-11-19T04:56:05,366 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992144002 after 1ms 2024-11-19T04:56:05,366 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992144002 2024-11-19T04:56:05,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40403 is added to blk_1073741838_1019 (size=1264) 2024-11-19T04:56:05,367 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992157961 is not closed yet, will try archiving it next time 2024-11-19T04:56:05,380 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731992144908/Put/vlen=218/seqid=0] 2024-11-19T04:56:05,381 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731992154638/Put/vlen=1045/seqid=0] 2024-11-19T04:56:05,381 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992144002 2024-11-19T04:56:05,381 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992157961 2024-11-19T04:56:05,381 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992157961 2024-11-19T04:56:05,381 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992157961 after 0ms 2024-11-19T04:56:05,381 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992157961 2024-11-19T04:56:05,386 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731992157960/Put/vlen=1045/seqid=0] 2024-11-19T04:56:05,386 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731992159973/Put/vlen=1045/seqid=0] 2024-11-19T04:56:05,386 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992157961 2024-11-19T04:56:05,386 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992163344 2024-11-19T04:56:05,386 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992163344 2024-11-19T04:56:05,386 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992163344 after 0ms 2024-11-19T04:56:05,386 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992163344 2024-11-19T04:56:05,390 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731992163343/Put/vlen=1045/seqid=0] 2024-11-19T04:56:05,391 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992165353 2024-11-19T04:56:05,391 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992165353 2024-11-19T04:56:05,391 WARN [IPC Server handler 3 on default port 42601 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992165353 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-19T04:56:05,391 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992165353 after 0ms 2024-11-19T04:56:06,109 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:06,225 WARN [ResponseProcessor for block BP-1239689047-172.17.0.2-1731992142713:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1239689047-172.17.0.2-1731992142713:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:56:06,225 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-418324952_22 at /127.0.0.1:36598 [Receiving block BP-1239689047-172.17.0.2-1731992142713:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:43143:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36598 dst: /127.0.0.1:43143 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:43143 remote=/127.0.0.1:36598]. Total timeout mills is 60000, 59133 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:56:06,226 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-418324952_22 at /127.0.0.1:56608 [Receiving block BP-1239689047-172.17.0.2-1731992142713:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:40403:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56608 dst: /127.0.0.1:40403 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:56:06,226 WARN [DataStreamer for file /user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992165353 block BP-1239689047-172.17.0.2-1731992142713:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1239689047-172.17.0.2-1731992142713:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43143,DS-5f22df2c-221c-448e-9d00-366dafd2a73a,DISK], DatanodeInfoWithStorage[127.0.0.1:40403,DS-94073cf7-b1ad-41b2-b652-74411d71ed62,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43143,DS-5f22df2c-221c-448e-9d00-366dafd2a73a,DISK]) is bad. 2024-11-19T04:56:06,227 WARN [DataStreamer for file /user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992165353 block BP-1239689047-172.17.0.2-1731992142713:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1239689047-172.17.0.2-1731992142713:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:56:06,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43143 is added to blk_1073741839_1022 (size=85) 2024-11-19T04:56:06,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40403 is added to blk_1073741839_1022 (size=85) 2024-11-19T04:56:06,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:07,110 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:07,225 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-19T04:56:07,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:07,353 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992157961 after 4001ms 2024-11-19T04:56:08,111 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:08,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:09,111 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:09,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:09,392 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992165353 after 4001ms 2024-11-19T04:56:09,392 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992165353 2024-11-19T04:56:09,397 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992165353 2024-11-19T04:56:09,397 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-19T04:56:09,397 ERROR [FSHLog-0-hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808-prefix:08a7f35e60d4,45575,1731992143611.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43811,DS-5f22df2c-221c-448e-9d00-366dafd2a73a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:56:09,397 WARN [FSHLog-0-hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808-prefix:08a7f35e60d4,45575,1731992143611.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43811,DS-5f22df2c-221c-448e-9d00-366dafd2a73a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:56:09,398 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 08a7f35e60d4%2C45575%2C1731992143611.meta:.meta(num 1731992144388) roll requested 2024-11-19T04:56:09,398 INFO [regionserver/08a7f35e60d4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C45575%2C1731992143611.meta.1731992169398.meta 2024-11-19T04:56:09,409 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:09,409 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:09,409 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:09,409 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:09,410 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:09,410 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.meta.1731992144388.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.meta.1731992169398.meta 2024-11-19T04:56:09,410 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43811,DS-5f22df2c-221c-448e-9d00-366dafd2a73a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:56:09,410 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43811,DS-5f22df2c-221c-448e-9d00-366dafd2a73a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:56:09,410 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.meta.1731992144388.meta 2024-11-19T04:56:09,411 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32961:32961),(127.0.0.1/127.0.0.1:45245:45245)] 2024-11-19T04:56:09,411 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.meta.1731992144388.meta is not closed yet, will try archiving it next time 2024-11-19T04:56:09,411 WARN [IPC Server handler 1 on default port 42601 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.meta.1731992144388.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1014 2024-11-19T04:56:09,411 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.meta.1731992144388.meta after 1ms 2024-11-19T04:56:09,429 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/data/hbase/meta/1588230740/.tmp/info/8d36cd94f1e844ea9ba6f8d41a147c5c is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731992144555.136e0d74fa1d678f94528dcfc60180c9./info:regioninfo/1731992144912/Put/seqid=0 2024-11-19T04:56:09,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43143 is added to blk_1073741841_1025 (size=7125) 2024-11-19T04:56:09,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40403 is added to blk_1073741841_1025 (size=7125) 2024-11-19T04:56:09,443 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/data/hbase/meta/1588230740/.tmp/info/8d36cd94f1e844ea9ba6f8d41a147c5c 2024-11-19T04:56:09,465 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/data/hbase/meta/1588230740/.tmp/ns/ce99cfc41930408dba099d18ebf8cab3 is 43, key is default/ns:d/1731992144429/Put/seqid=0 2024-11-19T04:56:09,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40403 is added to blk_1073741842_1026 (size=5153) 2024-11-19T04:56:09,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43143 is added to blk_1073741842_1026 (size=5153) 2024-11-19T04:56:09,471 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/data/hbase/meta/1588230740/.tmp/ns/ce99cfc41930408dba099d18ebf8cab3 2024-11-19T04:56:09,498 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/data/hbase/meta/1588230740/.tmp/table/1de67c2ccd5e4441886600a18d0d81ca is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731992144923/Put/seqid=0 2024-11-19T04:56:09,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40403 is added to blk_1073741843_1027 (size=5438) 2024-11-19T04:56:09,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43143 is added to blk_1073741843_1027 (size=5438) 2024-11-19T04:56:09,506 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/data/hbase/meta/1588230740/.tmp/table/1de67c2ccd5e4441886600a18d0d81ca 2024-11-19T04:56:09,513 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/data/hbase/meta/1588230740/.tmp/info/8d36cd94f1e844ea9ba6f8d41a147c5c as hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/data/hbase/meta/1588230740/info/8d36cd94f1e844ea9ba6f8d41a147c5c 2024-11-19T04:56:09,520 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/data/hbase/meta/1588230740/info/8d36cd94f1e844ea9ba6f8d41a147c5c, entries=10, sequenceid=11, filesize=7.0 K 2024-11-19T04:56:09,522 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/data/hbase/meta/1588230740/.tmp/ns/ce99cfc41930408dba099d18ebf8cab3 as hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/data/hbase/meta/1588230740/ns/ce99cfc41930408dba099d18ebf8cab3 2024-11-19T04:56:09,528 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/data/hbase/meta/1588230740/ns/ce99cfc41930408dba099d18ebf8cab3, entries=2, sequenceid=11, filesize=5.0 K 2024-11-19T04:56:09,530 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/data/hbase/meta/1588230740/.tmp/table/1de67c2ccd5e4441886600a18d0d81ca as hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/data/hbase/meta/1588230740/table/1de67c2ccd5e4441886600a18d0d81ca 2024-11-19T04:56:09,536 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/data/hbase/meta/1588230740/table/1de67c2ccd5e4441886600a18d0d81ca, entries=2, sequenceid=11, filesize=5.3 K 2024-11-19T04:56:09,537 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 140ms, sequenceid=11, compaction requested=false 2024-11-19T04:56:09,538 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-19T04:56:09,538 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 136e0d74fa1d678f94528dcfc60180c9 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-19T04:56:09,538 ERROR [FSHLog-0-hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808-prefix:08a7f35e60d4,45575,1731992143611 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1239689047-172.17.0.2-1731992142713:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:56:09,539 WARN [FSHLog-0-hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808-prefix:08a7f35e60d4,45575,1731992143611 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1239689047-172.17.0.2-1731992142713:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:56:09,539 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 08a7f35e60d4%2C45575%2C1731992143611:(num 1731992165353) roll requested 2024-11-19T04:56:09,539 INFO [regionserver/08a7f35e60d4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C45575%2C1731992143611.1731992169539 2024-11-19T04:56:09,545 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992165353 newFile=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992169539 2024-11-19T04:56:09,545 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:09,545 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:09,546 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:09,546 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:09,546 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:09,546 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992165353 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992169539 2024-11-19T04:56:09,546 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1239689047-172.17.0.2-1731992142713:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:56:09,547 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45245:45245),(127.0.0.1/127.0.0.1:32961:32961)] 2024-11-19T04:56:09,547 DEBUG [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992165353 is not closed yet, will try archiving it next time 2024-11-19T04:56:09,546 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1239689047-172.17.0.2-1731992142713:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:56:09,547 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992165353 2024-11-19T04:56:09,547 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992165353 after 0ms 2024-11-19T04:56:09,548 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.1731992165353 to hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/oldWALs/08a7f35e60d4%2C45575%2C1731992143611.1731992165353 2024-11-19T04:56:09,564 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/data/default/TestLogRolling-testLogRollOnPipelineRestart/136e0d74fa1d678f94528dcfc60180c9/.tmp/info/bcfb9f87bed44941baf38893c5bf4545 is 1080, key is row1002/info:/1731992154638/Put/seqid=0 2024-11-19T04:56:09,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43143 is added to blk_1073741845_1029 (size=9270) 2024-11-19T04:56:09,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40403 is added to blk_1073741845_1029 (size=9270) 2024-11-19T04:56:09,570 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/data/default/TestLogRolling-testLogRollOnPipelineRestart/136e0d74fa1d678f94528dcfc60180c9/.tmp/info/bcfb9f87bed44941baf38893c5bf4545 2024-11-19T04:56:09,576 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/data/default/TestLogRolling-testLogRollOnPipelineRestart/136e0d74fa1d678f94528dcfc60180c9/.tmp/info/bcfb9f87bed44941baf38893c5bf4545 as hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/data/default/TestLogRolling-testLogRollOnPipelineRestart/136e0d74fa1d678f94528dcfc60180c9/info/bcfb9f87bed44941baf38893c5bf4545 2024-11-19T04:56:09,588 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/data/default/TestLogRolling-testLogRollOnPipelineRestart/136e0d74fa1d678f94528dcfc60180c9/info/bcfb9f87bed44941baf38893c5bf4545, entries=4, sequenceid=8, filesize=9.1 K 2024-11-19T04:56:09,589 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 136e0d74fa1d678f94528dcfc60180c9 in 51ms, sequenceid=8, compaction requested=false 2024-11-19T04:56:09,589 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 136e0d74fa1d678f94528dcfc60180c9: 2024-11-19T04:56:09,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T04:56:09,595 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T04:56:09,596 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T04:56:09,596 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:56:09,596 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:56:09,596 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T04:56:09,596 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T04:56:09,596 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=531908702, stopped=false 2024-11-19T04:56:09,596 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=08a7f35e60d4,42521,1731992143563 2024-11-19T04:56:09,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45575-0x1012e94b14c0001, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T04:56:09,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45575-0x1012e94b14c0001, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:56:09,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42521-0x1012e94b14c0000, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T04:56:09,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42521-0x1012e94b14c0000, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:56:09,598 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T04:56:09,598 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T04:56:09,598 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T04:56:09,598 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:56:09,599 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '08a7f35e60d4,45575,1731992143611' ***** 2024-11-19T04:56:09,599 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T04:56:09,599 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42521-0x1012e94b14c0000, quorum=127.0.0.1:52010, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T04:56:09,599 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45575-0x1012e94b14c0001, quorum=127.0.0.1:52010, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T04:56:09,599 INFO [RS:0;08a7f35e60d4:45575 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T04:56:09,599 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T04:56:09,599 INFO [RS:0;08a7f35e60d4:45575 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T04:56:09,599 INFO [RS:0;08a7f35e60d4:45575 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T04:56:09,600 INFO [RS:0;08a7f35e60d4:45575 {}] regionserver.HRegionServer(3091): Received CLOSE for 136e0d74fa1d678f94528dcfc60180c9 2024-11-19T04:56:09,600 INFO [RS:0;08a7f35e60d4:45575 {}] regionserver.HRegionServer(959): stopping server 08a7f35e60d4,45575,1731992143611 2024-11-19T04:56:09,600 INFO [RS:0;08a7f35e60d4:45575 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T04:56:09,600 INFO [RS:0;08a7f35e60d4:45575 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;08a7f35e60d4:45575. 2024-11-19T04:56:09,600 DEBUG [RS:0;08a7f35e60d4:45575 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T04:56:09,600 DEBUG [RS:0;08a7f35e60d4:45575 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:56:09,600 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 136e0d74fa1d678f94528dcfc60180c9, disabling compactions & flushes 2024-11-19T04:56:09,600 INFO [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731992144555.136e0d74fa1d678f94528dcfc60180c9. 2024-11-19T04:56:09,600 INFO [RS:0;08a7f35e60d4:45575 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T04:56:09,600 INFO [RS:0;08a7f35e60d4:45575 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T04:56:09,600 INFO [RS:0;08a7f35e60d4:45575 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T04:56:09,600 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731992144555.136e0d74fa1d678f94528dcfc60180c9. 2024-11-19T04:56:09,600 INFO [RS:0;08a7f35e60d4:45575 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T04:56:09,600 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731992144555.136e0d74fa1d678f94528dcfc60180c9. after waiting 0 ms 2024-11-19T04:56:09,600 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731992144555.136e0d74fa1d678f94528dcfc60180c9. 2024-11-19T04:56:09,600 INFO [RS:0;08a7f35e60d4:45575 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-19T04:56:09,600 DEBUG [RS:0;08a7f35e60d4:45575 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 136e0d74fa1d678f94528dcfc60180c9=TestLogRolling-testLogRollOnPipelineRestart,,1731992144555.136e0d74fa1d678f94528dcfc60180c9.} 2024-11-19T04:56:09,600 DEBUG [RS:0;08a7f35e60d4:45575 {}] regionserver.HRegionServer(1351): Waiting on 136e0d74fa1d678f94528dcfc60180c9, 1588230740 2024-11-19T04:56:09,601 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T04:56:09,601 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T04:56:09,601 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T04:56:09,601 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T04:56:09,601 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T04:56:09,609 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/data/default/TestLogRolling-testLogRollOnPipelineRestart/136e0d74fa1d678f94528dcfc60180c9/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-19T04:56:09,609 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-19T04:56:09,610 INFO [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731992144555.136e0d74fa1d678f94528dcfc60180c9. 2024-11-19T04:56:09,610 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 136e0d74fa1d678f94528dcfc60180c9: Waiting for close lock at 1731992169600Running coprocessor pre-close hooks at 1731992169600Disabling compacts and flushes for region at 1731992169600Disabling writes for close at 1731992169600Writing region close event to WAL at 1731992169604 (+4 ms)Running coprocessor post-close hooks at 1731992169610 (+6 ms)Closed at 1731992169610 2024-11-19T04:56:09,610 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T04:56:09,610 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T04:56:09,610 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731992144555.136e0d74fa1d678f94528dcfc60180c9. 2024-11-19T04:56:09,610 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731992169600Running coprocessor pre-close hooks at 1731992169600Disabling compacts and flushes for region at 1731992169600Disabling writes for close at 1731992169601 (+1 ms)Writing region close event to WAL at 1731992169606 (+5 ms)Running coprocessor post-close hooks at 1731992169610 (+4 ms)Closed at 1731992169610 2024-11-19T04:56:09,610 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T04:56:09,801 INFO [RS:0;08a7f35e60d4:45575 {}] regionserver.HRegionServer(976): stopping server 08a7f35e60d4,45575,1731992143611; all regions closed. 2024-11-19T04:56:09,801 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:09,801 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:09,802 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:09,802 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:09,802 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:09,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40403 is added to blk_1073741840_1023 (size=825) 2024-11-19T04:56:09,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43143 is added to blk_1073741840_1023 (size=825) 2024-11-19T04:56:09,869 INFO [regionserver/08a7f35e60d4:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T04:56:09,869 INFO [regionserver/08a7f35e60d4:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-19T04:56:09,869 INFO [regionserver/08a7f35e60d4:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-19T04:56:10,112 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:10,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:11,113 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:11,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:12,113 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:12,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:13,114 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:13,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:13,412 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.meta.1731992144388.meta after 4002ms 2024-11-19T04:56:13,412 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/WALs/08a7f35e60d4,45575,1731992143611/08a7f35e60d4%2C45575%2C1731992143611.meta.1731992144388.meta to hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/oldWALs/08a7f35e60d4%2C45575%2C1731992143611.meta.1731992144388.meta 2024-11-19T04:56:13,415 DEBUG [RS:0;08a7f35e60d4:45575 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/oldWALs 2024-11-19T04:56:13,415 INFO [RS:0;08a7f35e60d4:45575 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 08a7f35e60d4%2C45575%2C1731992143611.meta:.meta(num 1731992169398) 2024-11-19T04:56:13,416 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:13,416 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:13,416 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:13,416 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:13,416 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:13,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43143 is added to blk_1073741844_1028 (size=1162) 2024-11-19T04:56:13,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40403 is added to blk_1073741844_1028 (size=1162) 2024-11-19T04:56:13,423 DEBUG [RS:0;08a7f35e60d4:45575 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/oldWALs 2024-11-19T04:56:13,424 INFO [RS:0;08a7f35e60d4:45575 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 08a7f35e60d4%2C45575%2C1731992143611:(num 1731992169539) 2024-11-19T04:56:13,424 DEBUG [RS:0;08a7f35e60d4:45575 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:56:13,424 INFO [RS:0;08a7f35e60d4:45575 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T04:56:13,424 INFO [RS:0;08a7f35e60d4:45575 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T04:56:13,424 INFO [RS:0;08a7f35e60d4:45575 {}] hbase.ChoreService(370): Chore service for: regionserver/08a7f35e60d4:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-19T04:56:13,424 INFO [RS:0;08a7f35e60d4:45575 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T04:56:13,424 INFO [RS:0;08a7f35e60d4:45575 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45575 2024-11-19T04:56:13,425 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T04:56:13,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42521-0x1012e94b14c0000, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T04:56:13,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45575-0x1012e94b14c0001, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/08a7f35e60d4,45575,1731992143611 2024-11-19T04:56:13,426 INFO [RS:0;08a7f35e60d4:45575 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T04:56:13,426 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1014: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-19T04:56:13,426 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$364/0x00007f0f68903a08@1b0fb0ae rejected from java.util.concurrent.ThreadPoolExecutor@5e0d3dab[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-11-19T04:56:13,427 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [08a7f35e60d4,45575,1731992143611] 2024-11-19T04:56:13,429 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/08a7f35e60d4,45575,1731992143611 already deleted, retry=false 2024-11-19T04:56:13,430 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 08a7f35e60d4,45575,1731992143611 expired; onlineServers=0 2024-11-19T04:56:13,430 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '08a7f35e60d4,42521,1731992143563' ***** 2024-11-19T04:56:13,430 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T04:56:13,430 INFO [M:0;08a7f35e60d4:42521 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T04:56:13,430 INFO [M:0;08a7f35e60d4:42521 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T04:56:13,430 DEBUG [M:0;08a7f35e60d4:42521 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T04:56:13,430 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T04:56:13,430 DEBUG [M:0;08a7f35e60d4:42521 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T04:56:13,430 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.small.0-1731992143780 {}] cleaner.HFileCleaner(306): Exit Thread[master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.small.0-1731992143780,5,FailOnTimeoutGroup] 2024-11-19T04:56:13,430 INFO [M:0;08a7f35e60d4:42521 {}] hbase.ChoreService(370): Chore service for: master/08a7f35e60d4:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T04:56:13,430 INFO [M:0;08a7f35e60d4:42521 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T04:56:13,430 DEBUG [M:0;08a7f35e60d4:42521 {}] master.HMaster(1795): Stopping service threads 2024-11-19T04:56:13,430 INFO [M:0;08a7f35e60d4:42521 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T04:56:13,431 INFO [M:0;08a7f35e60d4:42521 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T04:56:13,431 INFO [M:0;08a7f35e60d4:42521 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T04:56:13,431 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T04:56:13,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42521-0x1012e94b14c0000, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T04:56:13,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42521-0x1012e94b14c0000, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:56:13,432 DEBUG [M:0;08a7f35e60d4:42521 {}] zookeeper.ZKUtil(347): master:42521-0x1012e94b14c0000, quorum=127.0.0.1:52010, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T04:56:13,432 WARN [M:0;08a7f35e60d4:42521 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T04:56:13,432 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.large.0-1731992143780 {}] cleaner.HFileCleaner(306): Exit Thread[master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.large.0-1731992143780,5,FailOnTimeoutGroup] 2024-11-19T04:56:13,432 INFO [M:0;08a7f35e60d4:42521 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/.lastflushedseqids 2024-11-19T04:56:13,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40403 is added to blk_1073741846_1030 (size=120) 2024-11-19T04:56:13,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43143 is added to blk_1073741846_1030 (size=120) 2024-11-19T04:56:13,439 INFO [M:0;08a7f35e60d4:42521 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T04:56:13,439 INFO [M:0;08a7f35e60d4:42521 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T04:56:13,439 DEBUG [M:0;08a7f35e60d4:42521 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T04:56:13,439 INFO [M:0;08a7f35e60d4:42521 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:56:13,439 DEBUG [M:0;08a7f35e60d4:42521 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:56:13,439 DEBUG [M:0;08a7f35e60d4:42521 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T04:56:13,439 DEBUG [M:0;08a7f35e60d4:42521 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:56:13,440 INFO [M:0;08a7f35e60d4:42521 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.18 KB heapSize=29.16 KB 2024-11-19T04:56:13,440 ERROR [FSHLog-0-hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData-prefix:08a7f35e60d4,42521,1731992143563 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43811,DS-5f22df2c-221c-448e-9d00-366dafd2a73a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:56:13,440 WARN [FSHLog-0-hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData-prefix:08a7f35e60d4,42521,1731992143563 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43811,DS-5f22df2c-221c-448e-9d00-366dafd2a73a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:56:13,440 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 08a7f35e60d4%2C42521%2C1731992143563:(num 1731992143702) roll requested 2024-11-19T04:56:13,440 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C42521%2C1731992143563.1731992173440 2024-11-19T04:56:13,446 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:13,446 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:13,446 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:13,446 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:13,446 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:13,446 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/WALs/08a7f35e60d4,42521,1731992143563/08a7f35e60d4%2C42521%2C1731992143563.1731992143702 with entries=53, filesize=26.63 KB; new WAL /user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/WALs/08a7f35e60d4,42521,1731992143563/08a7f35e60d4%2C42521%2C1731992143563.1731992173440 2024-11-19T04:56:13,447 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43811,DS-5f22df2c-221c-448e-9d00-366dafd2a73a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:56:13,447 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43811,DS-5f22df2c-221c-448e-9d00-366dafd2a73a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T04:56:13,447 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/WALs/08a7f35e60d4,42521,1731992143563/08a7f35e60d4%2C42521%2C1731992143563.1731992143702 2024-11-19T04:56:13,447 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32961:32961),(127.0.0.1/127.0.0.1:45245:45245)] 2024-11-19T04:56:13,447 WARN [IPC Server handler 4 on default port 42601 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/WALs/08a7f35e60d4,42521,1731992143563/08a7f35e60d4%2C42521%2C1731992143563.1731992143702 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-11-19T04:56:13,448 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/WALs/08a7f35e60d4,42521,1731992143563/08a7f35e60d4%2C42521%2C1731992143563.1731992143702 is not closed yet, will try archiving it next time 2024-11-19T04:56:13,448 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/WALs/08a7f35e60d4,42521,1731992143563/08a7f35e60d4%2C42521%2C1731992143563.1731992143702 after 1ms 2024-11-19T04:56:13,464 DEBUG [M:0;08a7f35e60d4:42521 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ba5bfc2f191e43e2afa57431031fd009 is 82, key is hbase:meta,,1/info:regioninfo/1731992144412/Put/seqid=0 2024-11-19T04:56:13,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43143 is added to blk_1073741848_1033 (size=5672) 2024-11-19T04:56:13,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40403 is added to blk_1073741848_1033 (size=5672) 2024-11-19T04:56:13,471 INFO [M:0;08a7f35e60d4:42521 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ba5bfc2f191e43e2afa57431031fd009 2024-11-19T04:56:13,496 DEBUG [M:0;08a7f35e60d4:42521 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ddb704e0a7b44845b463b953f60656ef is 779, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731992144927/Put/seqid=0 2024-11-19T04:56:13,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40403 is added to blk_1073741849_1034 (size=6119) 2024-11-19T04:56:13,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43143 is added to blk_1073741849_1034 (size=6119) 2024-11-19T04:56:13,502 INFO [M:0;08a7f35e60d4:42521 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ddb704e0a7b44845b463b953f60656ef 2024-11-19T04:56:13,524 DEBUG [M:0;08a7f35e60d4:42521 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/182d53911d8e4627b5045f9edc9adf36 is 69, key is 08a7f35e60d4,45575,1731992143611/rs:state/1731992143853/Put/seqid=0 2024-11-19T04:56:13,528 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45575-0x1012e94b14c0001, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T04:56:13,528 INFO [RS:0;08a7f35e60d4:45575 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T04:56:13,528 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45575-0x1012e94b14c0001, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T04:56:13,528 INFO [RS:0;08a7f35e60d4:45575 {}] regionserver.HRegionServer(1031): Exiting; stopping=08a7f35e60d4,45575,1731992143611; zookeeper connection closed. 2024-11-19T04:56:13,529 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1da45224 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1da45224 2024-11-19T04:56:13,529 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-19T04:56:13,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43143 is added to blk_1073741850_1035 (size=5156) 2024-11-19T04:56:13,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40403 is added to blk_1073741850_1035 (size=5156) 2024-11-19T04:56:13,531 INFO [M:0;08a7f35e60d4:42521 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/182d53911d8e4627b5045f9edc9adf36 2024-11-19T04:56:13,546 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T04:56:13,552 DEBUG [M:0;08a7f35e60d4:42521 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/816b4a954987418ba4791f3a93cb1580 is 52, key is load_balancer_on/state:d/1731992144551/Put/seqid=0 2024-11-19T04:56:13,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43143 is added to blk_1073741851_1036 (size=5056) 2024-11-19T04:56:13,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40403 is added to blk_1073741851_1036 (size=5056) 2024-11-19T04:56:13,558 INFO [M:0;08a7f35e60d4:42521 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/816b4a954987418ba4791f3a93cb1580 2024-11-19T04:56:13,564 DEBUG [M:0;08a7f35e60d4:42521 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ba5bfc2f191e43e2afa57431031fd009 as hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ba5bfc2f191e43e2afa57431031fd009 2024-11-19T04:56:13,569 INFO [M:0;08a7f35e60d4:42521 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ba5bfc2f191e43e2afa57431031fd009, entries=8, sequenceid=56, filesize=5.5 K 2024-11-19T04:56:13,570 DEBUG [M:0;08a7f35e60d4:42521 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ddb704e0a7b44845b463b953f60656ef as hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ddb704e0a7b44845b463b953f60656ef 2024-11-19T04:56:13,575 INFO [M:0;08a7f35e60d4:42521 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ddb704e0a7b44845b463b953f60656ef, entries=6, sequenceid=56, filesize=6.0 K 2024-11-19T04:56:13,576 DEBUG [M:0;08a7f35e60d4:42521 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/182d53911d8e4627b5045f9edc9adf36 as hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/182d53911d8e4627b5045f9edc9adf36 2024-11-19T04:56:13,581 INFO [M:0;08a7f35e60d4:42521 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/182d53911d8e4627b5045f9edc9adf36, entries=1, sequenceid=56, filesize=5.0 K 2024-11-19T04:56:13,582 DEBUG [M:0;08a7f35e60d4:42521 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/816b4a954987418ba4791f3a93cb1580 as hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/816b4a954987418ba4791f3a93cb1580 2024-11-19T04:56:13,587 INFO [M:0;08a7f35e60d4:42521 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/816b4a954987418ba4791f3a93cb1580, entries=1, sequenceid=56, filesize=4.9 K 2024-11-19T04:56:13,588 INFO [M:0;08a7f35e60d4:42521 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 149ms, sequenceid=56, compaction requested=false 2024-11-19T04:56:13,590 INFO [M:0;08a7f35e60d4:42521 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:56:13,590 DEBUG [M:0;08a7f35e60d4:42521 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731992173439Disabling compacts and flushes for region at 1731992173439Disabling writes for close at 1731992173439Obtaining lock to block concurrent updates at 1731992173440 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731992173440Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23738, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1731992173440Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731992173448 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731992173448Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731992173464 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731992173464Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731992173476 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731992173495 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731992173495Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731992173507 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731992173524 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731992173524Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731992173536 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731992173551 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731992173551Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7cf23326: reopening flushed file at 1731992173563 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1f18490f: reopening flushed file at 1731992173570 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2846e615: reopening flushed file at 1731992173575 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@79c7f787: reopening flushed file at 1731992173581 (+6 ms)Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 149ms, sequenceid=56, compaction requested=false at 1731992173588 (+7 ms)Writing region close event to WAL at 1731992173590 (+2 ms)Closed at 1731992173590 2024-11-19T04:56:13,590 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:13,590 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:13,590 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:13,591 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:13,591 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:13,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40403 is added to blk_1073741847_1031 (size=757) 2024-11-19T04:56:13,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43143 is added to blk_1073741847_1031 (size=757) 2024-11-19T04:56:14,115 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:14,336 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:14,628 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:14,628 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:14,628 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:14,628 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:14,628 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:14,629 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:14,632 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:14,632 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:14,632 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:14,635 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:14,638 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:14,639 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:15,115 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:15,141 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T04:56:15,143 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:15,143 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:15,163 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:15,164 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:15,164 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:15,164 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:15,165 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:15,165 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:15,168 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:15,168 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:15,169 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:15,171 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:15,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:16,116 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:16,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:16,427 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-19T04:56:17,116 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:17,146 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T04:56:17,146 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T04:56:17,147 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-19T04:56:17,147 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-19T04:56:17,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:17,449 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/WALs/08a7f35e60d4,42521,1731992143563/08a7f35e60d4%2C42521%2C1731992143563.1731992143702 after 4001ms 2024-11-19T04:56:17,449 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/WALs/08a7f35e60d4,42521,1731992143563/08a7f35e60d4%2C42521%2C1731992143563.1731992143702 to hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/oldWALs/08a7f35e60d4%2C42521%2C1731992143563.1731992143702 2024-11-19T04:56:17,453 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/MasterData/oldWALs/08a7f35e60d4%2C42521%2C1731992143563.1731992143702 to hdfs://localhost:42601/user/jenkins/test-data/b1366704-9265-e2d7-3c55-32b2d98b2808/oldWALs/08a7f35e60d4%2C42521%2C1731992143563.1731992143702$masterlocalwal$ 2024-11-19T04:56:17,453 INFO [M:0;08a7f35e60d4:42521 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T04:56:17,453 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T04:56:17,453 INFO [M:0;08a7f35e60d4:42521 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42521 2024-11-19T04:56:17,453 INFO [M:0;08a7f35e60d4:42521 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T04:56:17,555 INFO [M:0;08a7f35e60d4:42521 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T04:56:17,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42521-0x1012e94b14c0000, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T04:56:17,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42521-0x1012e94b14c0000, quorum=127.0.0.1:52010, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T04:56:17,558 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@528eeea6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:56:17,558 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@20466b6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T04:56:17,558 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T04:56:17,559 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2026736d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T04:56:17,559 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@760c54cf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/hadoop.log.dir/,STOPPED} 2024-11-19T04:56:17,560 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T04:56:17,560 WARN [BP-1239689047-172.17.0.2-1731992142713 heartbeating to localhost/127.0.0.1:42601 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T04:56:17,560 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T04:56:17,560 WARN [BP-1239689047-172.17.0.2-1731992142713 heartbeating to localhost/127.0.0.1:42601 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1239689047-172.17.0.2-1731992142713 (Datanode Uuid 498e3122-0852-427c-9e8a-ef967a03db89) service to localhost/127.0.0.1:42601 2024-11-19T04:56:17,561 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/cluster_14c23cca-1b11-3138-a135-f4ba00ba23c7/data/data3/current/BP-1239689047-172.17.0.2-1731992142713 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:56:17,561 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/cluster_14c23cca-1b11-3138-a135-f4ba00ba23c7/data/data4/current/BP-1239689047-172.17.0.2-1731992142713 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:56:17,562 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T04:56:17,564 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2ca8564b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:56:17,564 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@fa662a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T04:56:17,564 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T04:56:17,564 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@167fd01b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T04:56:17,564 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@718ea2f4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/hadoop.log.dir/,STOPPED} 2024-11-19T04:56:17,565 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T04:56:17,565 WARN [BP-1239689047-172.17.0.2-1731992142713 heartbeating to localhost/127.0.0.1:42601 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T04:56:17,565 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T04:56:17,565 WARN [BP-1239689047-172.17.0.2-1731992142713 heartbeating to localhost/127.0.0.1:42601 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1239689047-172.17.0.2-1731992142713 (Datanode Uuid 2717070f-ca22-4f46-80fb-eee7927e4ef3) service to localhost/127.0.0.1:42601 2024-11-19T04:56:17,566 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/cluster_14c23cca-1b11-3138-a135-f4ba00ba23c7/data/data1/current/BP-1239689047-172.17.0.2-1731992142713 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:56:17,566 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/cluster_14c23cca-1b11-3138-a135-f4ba00ba23c7/data/data2/current/BP-1239689047-172.17.0.2-1731992142713 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:56:17,566 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T04:56:17,572 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1915705e{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T04:56:17,573 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@17b2a9ba{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T04:56:17,573 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T04:56:17,573 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@27a49013{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T04:56:17,573 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7bd03e52{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/hadoop.log.dir/,STOPPED} 2024-11-19T04:56:17,580 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T04:56:17,598 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T04:56:17,611 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=182 (was 157) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42601 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42601 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42601 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:42601 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42601 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42601 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42601 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42601 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=457 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=190 (was 166) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=11549 (was 12138) 2024-11-19T04:56:17,619 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=181, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=190, ProcessCount=11, AvailableMemoryMB=11549 2024-11-19T04:56:17,620 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T04:56:17,620 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/hadoop.log.dir so I do NOT create it in target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77 2024-11-19T04:56:17,620 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e596ca72-928d-9862-b8bc-f227563f33c6/hadoop.tmp.dir so I do NOT create it in target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77 2024-11-19T04:56:17,620 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/cluster_ad7551c8-4c43-5df4-4828-94fc57682521, deleteOnExit=true 2024-11-19T04:56:17,620 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T04:56:17,620 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/test.cache.data in system properties and HBase conf 2024-11-19T04:56:17,620 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T04:56:17,620 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/hadoop.log.dir in system properties and HBase conf 2024-11-19T04:56:17,620 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T04:56:17,621 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T04:56:17,621 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T04:56:17,621 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T04:56:17,621 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T04:56:17,621 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T04:56:17,621 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T04:56:17,621 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T04:56:17,621 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T04:56:17,621 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T04:56:17,621 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T04:56:17,622 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T04:56:17,622 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T04:56:17,622 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/nfs.dump.dir in system properties and HBase conf 2024-11-19T04:56:17,622 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/java.io.tmpdir in system properties and HBase conf 2024-11-19T04:56:17,622 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T04:56:17,622 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T04:56:17,622 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T04:56:17,637 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T04:56:17,719 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:56:17,723 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T04:56:17,725 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T04:56:17,725 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T04:56:17,725 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T04:56:17,726 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:56:17,726 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7a7d1da5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/hadoop.log.dir/,AVAILABLE} 2024-11-19T04:56:17,727 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@438136f2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T04:56:17,845 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@16208fe2{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/java.io.tmpdir/jetty-localhost-41223-hadoop-hdfs-3_4_1-tests_jar-_-any-9699456479666712517/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T04:56:17,846 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4a5372a5{HTTP/1.1, (http/1.1)}{localhost:41223} 2024-11-19T04:56:17,846 INFO [Time-limited test {}] server.Server(415): Started @185890ms 2024-11-19T04:56:17,859 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T04:56:17,930 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:56:17,933 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T04:56:17,934 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T04:56:17,934 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T04:56:17,934 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T04:56:17,935 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@214c2124{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/hadoop.log.dir/,AVAILABLE} 2024-11-19T04:56:17,935 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@f02078{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T04:56:18,057 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4114613b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/java.io.tmpdir/jetty-localhost-43911-hadoop-hdfs-3_4_1-tests_jar-_-any-15611236995736518574/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:56:18,058 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@21edc7fd{HTTP/1.1, (http/1.1)}{localhost:43911} 2024-11-19T04:56:18,058 INFO [Time-limited test {}] server.Server(415): Started @186102ms 2024-11-19T04:56:18,059 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T04:56:18,089 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:56:18,092 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T04:56:18,093 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T04:56:18,093 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T04:56:18,093 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T04:56:18,093 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e470e04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/hadoop.log.dir/,AVAILABLE} 2024-11-19T04:56:18,094 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4732430a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T04:56:18,117 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:18,175 WARN [Thread-1628 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/cluster_ad7551c8-4c43-5df4-4828-94fc57682521/data/data1/current/BP-417346895-172.17.0.2-1731992177654/current, will proceed with Du for space computation calculation, 2024-11-19T04:56:18,175 WARN [Thread-1629 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/cluster_ad7551c8-4c43-5df4-4828-94fc57682521/data/data2/current/BP-417346895-172.17.0.2-1731992177654/current, will proceed with Du for space computation calculation, 2024-11-19T04:56:18,193 WARN [Thread-1607 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T04:56:18,196 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x98011ef858a6bdf6 with lease ID 0x5aa5f789df9e2741: Processing first storage report for DS-80265583-de34-440f-9b02-21b074a469ac from datanode DatanodeRegistration(127.0.0.1:39585, datanodeUuid=31e542b5-9bd7-4c78-9bf5-3902c20471cb, infoPort=34939, infoSecurePort=0, ipcPort=43767, storageInfo=lv=-57;cid=testClusterID;nsid=1924067336;c=1731992177654) 2024-11-19T04:56:18,196 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x98011ef858a6bdf6 with lease ID 0x5aa5f789df9e2741: from storage DS-80265583-de34-440f-9b02-21b074a469ac node DatanodeRegistration(127.0.0.1:39585, datanodeUuid=31e542b5-9bd7-4c78-9bf5-3902c20471cb, infoPort=34939, infoSecurePort=0, ipcPort=43767, storageInfo=lv=-57;cid=testClusterID;nsid=1924067336;c=1731992177654), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:56:18,196 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x98011ef858a6bdf6 with lease ID 0x5aa5f789df9e2741: Processing first storage report for DS-72cb5d56-a56a-4070-b6a6-b7f85a69a5d6 from datanode DatanodeRegistration(127.0.0.1:39585, datanodeUuid=31e542b5-9bd7-4c78-9bf5-3902c20471cb, infoPort=34939, infoSecurePort=0, ipcPort=43767, storageInfo=lv=-57;cid=testClusterID;nsid=1924067336;c=1731992177654) 2024-11-19T04:56:18,196 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x98011ef858a6bdf6 with lease ID 0x5aa5f789df9e2741: from storage DS-72cb5d56-a56a-4070-b6a6-b7f85a69a5d6 node DatanodeRegistration(127.0.0.1:39585, datanodeUuid=31e542b5-9bd7-4c78-9bf5-3902c20471cb, infoPort=34939, infoSecurePort=0, ipcPort=43767, storageInfo=lv=-57;cid=testClusterID;nsid=1924067336;c=1731992177654), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:56:18,224 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@677a249b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/java.io.tmpdir/jetty-localhost-36865-hadoop-hdfs-3_4_1-tests_jar-_-any-17125959137076912569/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:56:18,224 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e21aaf2{HTTP/1.1, (http/1.1)}{localhost:36865} 2024-11-19T04:56:18,225 INFO [Time-limited test {}] server.Server(415): Started @186269ms 2024-11-19T04:56:18,226 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T04:56:18,337 WARN [Thread-1654 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/cluster_ad7551c8-4c43-5df4-4828-94fc57682521/data/data3/current/BP-417346895-172.17.0.2-1731992177654/current, will proceed with Du for space computation calculation, 2024-11-19T04:56:18,337 WARN [Thread-1655 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/cluster_ad7551c8-4c43-5df4-4828-94fc57682521/data/data4/current/BP-417346895-172.17.0.2-1731992177654/current, will proceed with Du for space computation calculation, 2024-11-19T04:56:18,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:18,355 WARN [Thread-1643 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T04:56:18,357 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf3ba3086bc2e8e93 with lease ID 0x5aa5f789df9e2742: Processing first storage report for DS-7c3f0cb2-4200-40ef-b4a9-12a3adf8a688 from datanode DatanodeRegistration(127.0.0.1:42689, datanodeUuid=6b711a01-e1a6-4dbd-8f74-a527a7839e4e, infoPort=44689, infoSecurePort=0, ipcPort=41469, storageInfo=lv=-57;cid=testClusterID;nsid=1924067336;c=1731992177654) 2024-11-19T04:56:18,358 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf3ba3086bc2e8e93 with lease ID 0x5aa5f789df9e2742: from storage DS-7c3f0cb2-4200-40ef-b4a9-12a3adf8a688 node DatanodeRegistration(127.0.0.1:42689, datanodeUuid=6b711a01-e1a6-4dbd-8f74-a527a7839e4e, infoPort=44689, infoSecurePort=0, ipcPort=41469, storageInfo=lv=-57;cid=testClusterID;nsid=1924067336;c=1731992177654), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:56:18,358 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf3ba3086bc2e8e93 with lease ID 0x5aa5f789df9e2742: Processing first storage report for DS-5b9a742a-26d8-4bf4-9da7-eefd5d33c7c7 from datanode DatanodeRegistration(127.0.0.1:42689, datanodeUuid=6b711a01-e1a6-4dbd-8f74-a527a7839e4e, infoPort=44689, infoSecurePort=0, ipcPort=41469, storageInfo=lv=-57;cid=testClusterID;nsid=1924067336;c=1731992177654) 2024-11-19T04:56:18,358 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf3ba3086bc2e8e93 with lease ID 0x5aa5f789df9e2742: from storage DS-5b9a742a-26d8-4bf4-9da7-eefd5d33c7c7 node DatanodeRegistration(127.0.0.1:42689, datanodeUuid=6b711a01-e1a6-4dbd-8f74-a527a7839e4e, infoPort=44689, infoSecurePort=0, ipcPort=41469, storageInfo=lv=-57;cid=testClusterID;nsid=1924067336;c=1731992177654), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:56:18,453 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77 2024-11-19T04:56:18,455 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/cluster_ad7551c8-4c43-5df4-4828-94fc57682521/zookeeper_0, clientPort=53417, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/cluster_ad7551c8-4c43-5df4-4828-94fc57682521/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/cluster_ad7551c8-4c43-5df4-4828-94fc57682521/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T04:56:18,456 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=53417 2024-11-19T04:56:18,457 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:56:18,458 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:56:18,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741825_1001 (size=7) 2024-11-19T04:56:18,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39585 is added to blk_1073741825_1001 (size=7) 2024-11-19T04:56:18,468 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e with version=8 2024-11-19T04:56:18,468 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/hbase-staging 2024-11-19T04:56:18,470 INFO [Time-limited test {}] client.ConnectionUtils(128): master/08a7f35e60d4:0 server-side Connection retries=45 2024-11-19T04:56:18,470 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T04:56:18,470 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T04:56:18,470 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T04:56:18,470 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T04:56:18,470 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T04:56:18,470 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T04:56:18,471 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T04:56:18,471 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40549 2024-11-19T04:56:18,472 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40549 connecting to ZooKeeper ensemble=127.0.0.1:53417 2024-11-19T04:56:18,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:405490x0, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T04:56:18,480 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40549-0x1012e9539a60000 connected 2024-11-19T04:56:18,496 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:56:18,498 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:56:18,500 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40549-0x1012e9539a60000, quorum=127.0.0.1:53417, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T04:56:18,500 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e, hbase.cluster.distributed=false 2024-11-19T04:56:18,501 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40549-0x1012e9539a60000, quorum=127.0.0.1:53417, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T04:56:18,502 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40549 2024-11-19T04:56:18,502 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40549 2024-11-19T04:56:18,502 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40549 2024-11-19T04:56:18,503 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40549 2024-11-19T04:56:18,503 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40549 2024-11-19T04:56:18,519 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/08a7f35e60d4:0 server-side Connection retries=45 2024-11-19T04:56:18,519 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T04:56:18,519 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T04:56:18,519 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T04:56:18,519 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T04:56:18,519 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T04:56:18,519 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T04:56:18,520 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T04:56:18,520 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45383 2024-11-19T04:56:18,522 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45383 connecting to ZooKeeper ensemble=127.0.0.1:53417 2024-11-19T04:56:18,522 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:56:18,524 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:56:18,528 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:453830x0, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T04:56:18,529 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:453830x0, quorum=127.0.0.1:53417, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T04:56:18,529 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45383-0x1012e9539a60001 connected 2024-11-19T04:56:18,529 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T04:56:18,530 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T04:56:18,531 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45383-0x1012e9539a60001, quorum=127.0.0.1:53417, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T04:56:18,532 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45383-0x1012e9539a60001, quorum=127.0.0.1:53417, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T04:56:18,532 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45383 2024-11-19T04:56:18,533 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45383 2024-11-19T04:56:18,533 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45383 2024-11-19T04:56:18,533 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45383 2024-11-19T04:56:18,533 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45383 2024-11-19T04:56:18,548 DEBUG [M:0;08a7f35e60d4:40549 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;08a7f35e60d4:40549 2024-11-19T04:56:18,548 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/08a7f35e60d4,40549,1731992178470 2024-11-19T04:56:18,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45383-0x1012e9539a60001, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T04:56:18,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1012e9539a60000, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T04:56:18,550 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40549-0x1012e9539a60000, quorum=127.0.0.1:53417, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/08a7f35e60d4,40549,1731992178470 2024-11-19T04:56:18,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45383-0x1012e9539a60001, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T04:56:18,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1012e9539a60000, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:56:18,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45383-0x1012e9539a60001, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:56:18,552 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40549-0x1012e9539a60000, quorum=127.0.0.1:53417, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T04:56:18,553 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/08a7f35e60d4,40549,1731992178470 from backup master directory 2024-11-19T04:56:18,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45383-0x1012e9539a60001, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T04:56:18,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1012e9539a60000, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/08a7f35e60d4,40549,1731992178470 2024-11-19T04:56:18,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1012e9539a60000, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T04:56:18,556 WARN [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T04:56:18,556 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=08a7f35e60d4,40549,1731992178470 2024-11-19T04:56:18,560 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/hbase.id] with ID: d71c29aa-8a4b-42a0-b4fc-b8025333c5aa 2024-11-19T04:56:18,560 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/.tmp/hbase.id 2024-11-19T04:56:18,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39585 is added to blk_1073741826_1002 (size=42) 2024-11-19T04:56:18,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741826_1002 (size=42) 2024-11-19T04:56:18,567 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/.tmp/hbase.id]:[hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/hbase.id] 2024-11-19T04:56:18,579 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:56:18,579 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T04:56:18,580 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-19T04:56:18,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45383-0x1012e9539a60001, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:56:18,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1012e9539a60000, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:56:18,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39585 is added to blk_1073741827_1003 (size=196) 2024-11-19T04:56:18,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741827_1003 (size=196) 2024-11-19T04:56:18,592 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T04:56:18,593 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T04:56:18,594 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T04:56:18,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741828_1004 (size=1189) 2024-11-19T04:56:18,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39585 is added to blk_1073741828_1004 (size=1189) 2024-11-19T04:56:18,601 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/MasterData/data/master/store 2024-11-19T04:56:18,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741829_1005 (size=34) 2024-11-19T04:56:18,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39585 is added to blk_1073741829_1005 (size=34) 2024-11-19T04:56:18,609 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:56:18,609 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T04:56:18,609 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:56:18,609 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:56:18,609 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T04:56:18,609 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:56:18,609 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:56:18,609 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731992178609Disabling compacts and flushes for region at 1731992178609Disabling writes for close at 1731992178609Writing region close event to WAL at 1731992178609Closed at 1731992178609 2024-11-19T04:56:18,610 WARN [master/08a7f35e60d4:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/MasterData/data/master/store/.initializing 2024-11-19T04:56:18,610 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/MasterData/WALs/08a7f35e60d4,40549,1731992178470 2024-11-19T04:56:18,612 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=08a7f35e60d4%2C40549%2C1731992178470, suffix=, logDir=hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/MasterData/WALs/08a7f35e60d4,40549,1731992178470, archiveDir=hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/MasterData/oldWALs, maxLogs=10 2024-11-19T04:56:18,613 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C40549%2C1731992178470.1731992178613 2024-11-19T04:56:18,618 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/MasterData/WALs/08a7f35e60d4,40549,1731992178470/08a7f35e60d4%2C40549%2C1731992178470.1731992178613 2024-11-19T04:56:18,619 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34939:34939),(127.0.0.1/127.0.0.1:44689:44689)] 2024-11-19T04:56:18,620 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T04:56:18,620 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:56:18,620 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:56:18,620 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:56:18,623 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:56:18,625 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T04:56:18,625 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:56:18,625 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:56:18,625 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:56:18,626 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T04:56:18,626 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:56:18,627 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T04:56:18,627 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:56:18,628 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T04:56:18,628 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:56:18,628 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T04:56:18,629 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:56:18,630 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T04:56:18,630 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:56:18,630 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T04:56:18,630 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:56:18,631 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:56:18,632 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:56:18,633 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:56:18,633 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:56:18,634 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T04:56:18,636 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:56:18,641 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T04:56:18,641 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=708652, jitterRate=-0.0989035964012146}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T04:56:18,642 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731992178620Initializing all the Stores at 1731992178621 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992178621Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992178623 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992178623Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992178623Cleaning up temporary data from old regions at 1731992178633 (+10 ms)Region opened successfully at 1731992178642 (+9 ms) 2024-11-19T04:56:18,642 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T04:56:18,646 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79980945, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=08a7f35e60d4/172.17.0.2:0 2024-11-19T04:56:18,647 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T04:56:18,647 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T04:56:18,647 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T04:56:18,648 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T04:56:18,648 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-19T04:56:18,649 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-19T04:56:18,649 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T04:56:18,655 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T04:56:18,656 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40549-0x1012e9539a60000, quorum=127.0.0.1:53417, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T04:56:18,657 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T04:56:18,657 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T04:56:18,658 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40549-0x1012e9539a60000, quorum=127.0.0.1:53417, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T04:56:18,659 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T04:56:18,660 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T04:56:18,661 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40549-0x1012e9539a60000, quorum=127.0.0.1:53417, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T04:56:18,664 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T04:56:18,665 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40549-0x1012e9539a60000, quorum=127.0.0.1:53417, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T04:56:18,666 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T04:56:18,668 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40549-0x1012e9539a60000, quorum=127.0.0.1:53417, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T04:56:18,669 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T04:56:18,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1012e9539a60000, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T04:56:18,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45383-0x1012e9539a60001, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T04:56:18,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1012e9539a60000, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:56:18,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45383-0x1012e9539a60001, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:56:18,672 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=08a7f35e60d4,40549,1731992178470, sessionid=0x1012e9539a60000, setting cluster-up flag (Was=false) 2024-11-19T04:56:18,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1012e9539a60000, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:56:18,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45383-0x1012e9539a60001, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:56:18,682 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T04:56:18,683 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=08a7f35e60d4,40549,1731992178470 2024-11-19T04:56:18,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1012e9539a60000, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:56:18,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45383-0x1012e9539a60001, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:56:18,692 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T04:56:18,693 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=08a7f35e60d4,40549,1731992178470 2024-11-19T04:56:18,694 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T04:56:18,696 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T04:56:18,696 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T04:56:18,696 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T04:56:18,696 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 08a7f35e60d4,40549,1731992178470 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T04:56:18,697 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/08a7f35e60d4:0, corePoolSize=5, maxPoolSize=5 2024-11-19T04:56:18,698 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/08a7f35e60d4:0, corePoolSize=5, maxPoolSize=5 2024-11-19T04:56:18,698 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/08a7f35e60d4:0, corePoolSize=5, maxPoolSize=5 2024-11-19T04:56:18,698 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/08a7f35e60d4:0, corePoolSize=5, maxPoolSize=5 2024-11-19T04:56:18,698 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/08a7f35e60d4:0, corePoolSize=10, maxPoolSize=10 2024-11-19T04:56:18,698 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:56:18,698 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/08a7f35e60d4:0, corePoolSize=2, maxPoolSize=2 2024-11-19T04:56:18,698 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:56:18,699 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T04:56:18,699 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T04:56:18,701 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:56:18,701 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T04:56:18,702 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731992208702 2024-11-19T04:56:18,702 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T04:56:18,702 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T04:56:18,702 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T04:56:18,702 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T04:56:18,702 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T04:56:18,702 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T04:56:18,702 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T04:56:18,703 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T04:56:18,703 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T04:56:18,703 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T04:56:18,703 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T04:56:18,703 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T04:56:18,703 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.large.0-1731992178703,5,FailOnTimeoutGroup] 2024-11-19T04:56:18,704 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.small.0-1731992178704,5,FailOnTimeoutGroup] 2024-11-19T04:56:18,704 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T04:56:18,704 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T04:56:18,704 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T04:56:18,704 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T04:56:18,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39585 is added to blk_1073741831_1007 (size=1321) 2024-11-19T04:56:18,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741831_1007 (size=1321) 2024-11-19T04:56:18,708 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T04:56:18,709 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e 2024-11-19T04:56:18,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39585 is added to blk_1073741832_1008 (size=32) 2024-11-19T04:56:18,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741832_1008 (size=32) 2024-11-19T04:56:18,717 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:56:18,718 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T04:56:18,720 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T04:56:18,720 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:56:18,720 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:56:18,720 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T04:56:18,722 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T04:56:18,722 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:56:18,722 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:56:18,722 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T04:56:18,723 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T04:56:18,723 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:56:18,724 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:56:18,724 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T04:56:18,725 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T04:56:18,725 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:56:18,725 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:56:18,726 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T04:56:18,726 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/hbase/meta/1588230740 2024-11-19T04:56:18,727 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/hbase/meta/1588230740 2024-11-19T04:56:18,728 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T04:56:18,728 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T04:56:18,728 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T04:56:18,729 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T04:56:18,731 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T04:56:18,732 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=700388, jitterRate=-0.10941079258918762}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T04:56:18,732 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731992178717Initializing all the Stores at 1731992178718 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992178718Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992178718Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992178718Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992178718Cleaning up temporary data from old regions at 1731992178728 (+10 ms)Region opened successfully at 1731992178732 (+4 ms) 2024-11-19T04:56:18,732 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T04:56:18,732 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T04:56:18,732 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T04:56:18,732 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T04:56:18,732 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T04:56:18,733 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T04:56:18,733 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731992178732Disabling compacts and flushes for region at 1731992178732Disabling writes for close at 1731992178732Writing region close event to WAL at 1731992178733 (+1 ms)Closed at 1731992178733 2024-11-19T04:56:18,734 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T04:56:18,734 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T04:56:18,734 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T04:56:18,735 INFO [RS:0;08a7f35e60d4:45383 {}] regionserver.HRegionServer(746): ClusterId : d71c29aa-8a4b-42a0-b4fc-b8025333c5aa 2024-11-19T04:56:18,735 DEBUG [RS:0;08a7f35e60d4:45383 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T04:56:18,736 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T04:56:18,737 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T04:56:18,738 DEBUG [RS:0;08a7f35e60d4:45383 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T04:56:18,738 DEBUG [RS:0;08a7f35e60d4:45383 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T04:56:18,740 DEBUG [RS:0;08a7f35e60d4:45383 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T04:56:18,740 DEBUG [RS:0;08a7f35e60d4:45383 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d515274, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=08a7f35e60d4/172.17.0.2:0 2024-11-19T04:56:18,753 DEBUG [RS:0;08a7f35e60d4:45383 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;08a7f35e60d4:45383 2024-11-19T04:56:18,753 INFO [RS:0;08a7f35e60d4:45383 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T04:56:18,753 INFO [RS:0;08a7f35e60d4:45383 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T04:56:18,753 DEBUG [RS:0;08a7f35e60d4:45383 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T04:56:18,754 INFO [RS:0;08a7f35e60d4:45383 {}] regionserver.HRegionServer(2659): reportForDuty to master=08a7f35e60d4,40549,1731992178470 with port=45383, startcode=1731992178519 2024-11-19T04:56:18,754 DEBUG [RS:0;08a7f35e60d4:45383 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T04:56:18,756 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52867, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T04:56:18,757 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40549 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 08a7f35e60d4,45383,1731992178519 2024-11-19T04:56:18,757 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40549 {}] master.ServerManager(517): Registering regionserver=08a7f35e60d4,45383,1731992178519 2024-11-19T04:56:18,758 DEBUG [RS:0;08a7f35e60d4:45383 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e 2024-11-19T04:56:18,759 DEBUG [RS:0;08a7f35e60d4:45383 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42051 2024-11-19T04:56:18,759 DEBUG [RS:0;08a7f35e60d4:45383 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T04:56:18,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1012e9539a60000, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T04:56:18,762 DEBUG [RS:0;08a7f35e60d4:45383 {}] zookeeper.ZKUtil(111): regionserver:45383-0x1012e9539a60001, quorum=127.0.0.1:53417, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/08a7f35e60d4,45383,1731992178519 2024-11-19T04:56:18,762 WARN [RS:0;08a7f35e60d4:45383 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T04:56:18,763 INFO [RS:0;08a7f35e60d4:45383 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T04:56:18,763 DEBUG [RS:0;08a7f35e60d4:45383 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/WALs/08a7f35e60d4,45383,1731992178519 2024-11-19T04:56:18,763 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [08a7f35e60d4,45383,1731992178519] 2024-11-19T04:56:18,766 INFO [RS:0;08a7f35e60d4:45383 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T04:56:18,767 INFO [RS:0;08a7f35e60d4:45383 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T04:56:18,768 INFO [RS:0;08a7f35e60d4:45383 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T04:56:18,768 INFO [RS:0;08a7f35e60d4:45383 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T04:56:18,768 INFO [RS:0;08a7f35e60d4:45383 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T04:56:18,768 INFO [RS:0;08a7f35e60d4:45383 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T04:56:18,769 INFO [RS:0;08a7f35e60d4:45383 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T04:56:18,769 DEBUG [RS:0;08a7f35e60d4:45383 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:56:18,769 DEBUG [RS:0;08a7f35e60d4:45383 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:56:18,769 DEBUG [RS:0;08a7f35e60d4:45383 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:56:18,769 DEBUG [RS:0;08a7f35e60d4:45383 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:56:18,769 DEBUG [RS:0;08a7f35e60d4:45383 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:56:18,769 DEBUG [RS:0;08a7f35e60d4:45383 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/08a7f35e60d4:0, corePoolSize=2, maxPoolSize=2 2024-11-19T04:56:18,769 DEBUG [RS:0;08a7f35e60d4:45383 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:56:18,769 DEBUG [RS:0;08a7f35e60d4:45383 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:56:18,769 DEBUG [RS:0;08a7f35e60d4:45383 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:56:18,769 DEBUG [RS:0;08a7f35e60d4:45383 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:56:18,769 DEBUG [RS:0;08a7f35e60d4:45383 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:56:18,769 DEBUG [RS:0;08a7f35e60d4:45383 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:56:18,769 DEBUG [RS:0;08a7f35e60d4:45383 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/08a7f35e60d4:0, corePoolSize=3, maxPoolSize=3 2024-11-19T04:56:18,769 DEBUG [RS:0;08a7f35e60d4:45383 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0, corePoolSize=3, maxPoolSize=3 2024-11-19T04:56:18,770 INFO [RS:0;08a7f35e60d4:45383 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T04:56:18,770 INFO [RS:0;08a7f35e60d4:45383 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T04:56:18,770 INFO [RS:0;08a7f35e60d4:45383 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T04:56:18,770 INFO [RS:0;08a7f35e60d4:45383 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T04:56:18,770 INFO [RS:0;08a7f35e60d4:45383 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T04:56:18,770 INFO [RS:0;08a7f35e60d4:45383 {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,45383,1731992178519-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T04:56:18,785 INFO [RS:0;08a7f35e60d4:45383 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T04:56:18,785 INFO [RS:0;08a7f35e60d4:45383 {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,45383,1731992178519-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T04:56:18,785 INFO [RS:0;08a7f35e60d4:45383 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:56:18,785 INFO [RS:0;08a7f35e60d4:45383 {}] regionserver.Replication(171): 08a7f35e60d4,45383,1731992178519 started 2024-11-19T04:56:18,799 INFO [RS:0;08a7f35e60d4:45383 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:56:18,799 INFO [RS:0;08a7f35e60d4:45383 {}] regionserver.HRegionServer(1482): Serving as 08a7f35e60d4,45383,1731992178519, RpcServer on 08a7f35e60d4/172.17.0.2:45383, sessionid=0x1012e9539a60001 2024-11-19T04:56:18,799 DEBUG [RS:0;08a7f35e60d4:45383 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T04:56:18,799 DEBUG [RS:0;08a7f35e60d4:45383 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 08a7f35e60d4,45383,1731992178519 2024-11-19T04:56:18,799 DEBUG [RS:0;08a7f35e60d4:45383 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '08a7f35e60d4,45383,1731992178519' 2024-11-19T04:56:18,799 DEBUG [RS:0;08a7f35e60d4:45383 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T04:56:18,800 DEBUG [RS:0;08a7f35e60d4:45383 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T04:56:18,800 DEBUG [RS:0;08a7f35e60d4:45383 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T04:56:18,800 DEBUG [RS:0;08a7f35e60d4:45383 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T04:56:18,800 DEBUG [RS:0;08a7f35e60d4:45383 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 08a7f35e60d4,45383,1731992178519 2024-11-19T04:56:18,800 DEBUG [RS:0;08a7f35e60d4:45383 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '08a7f35e60d4,45383,1731992178519' 2024-11-19T04:56:18,800 DEBUG [RS:0;08a7f35e60d4:45383 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T04:56:18,801 DEBUG [RS:0;08a7f35e60d4:45383 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T04:56:18,801 DEBUG [RS:0;08a7f35e60d4:45383 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T04:56:18,801 INFO [RS:0;08a7f35e60d4:45383 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T04:56:18,801 INFO [RS:0;08a7f35e60d4:45383 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T04:56:18,888 WARN [08a7f35e60d4:40549 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-19T04:56:18,903 INFO [RS:0;08a7f35e60d4:45383 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=08a7f35e60d4%2C45383%2C1731992178519, suffix=, logDir=hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/WALs/08a7f35e60d4,45383,1731992178519, archiveDir=hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/oldWALs, maxLogs=32 2024-11-19T04:56:18,904 INFO [RS:0;08a7f35e60d4:45383 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C45383%2C1731992178519.1731992178904 2024-11-19T04:56:18,910 INFO [RS:0;08a7f35e60d4:45383 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/WALs/08a7f35e60d4,45383,1731992178519/08a7f35e60d4%2C45383%2C1731992178519.1731992178904 2024-11-19T04:56:18,911 DEBUG [RS:0;08a7f35e60d4:45383 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34939:34939),(127.0.0.1/127.0.0.1:44689:44689)] 2024-11-19T04:56:19,118 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:19,138 DEBUG [08a7f35e60d4:40549 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T04:56:19,138 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=08a7f35e60d4,45383,1731992178519 2024-11-19T04:56:19,140 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 08a7f35e60d4,45383,1731992178519, state=OPENING 2024-11-19T04:56:19,141 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T04:56:19,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1012e9539a60000, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:56:19,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45383-0x1012e9539a60001, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:56:19,143 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T04:56:19,143 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T04:56:19,143 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T04:56:19,143 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=08a7f35e60d4,45383,1731992178519}] 2024-11-19T04:56:19,297 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T04:56:19,299 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53239, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T04:56:19,304 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T04:56:19,304 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T04:56:19,306 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=08a7f35e60d4%2C45383%2C1731992178519.meta, suffix=.meta, logDir=hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/WALs/08a7f35e60d4,45383,1731992178519, archiveDir=hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/oldWALs, maxLogs=32 2024-11-19T04:56:19,306 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C45383%2C1731992178519.meta.1731992179306.meta 2024-11-19T04:56:19,311 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/WALs/08a7f35e60d4,45383,1731992178519/08a7f35e60d4%2C45383%2C1731992178519.meta.1731992179306.meta 2024-11-19T04:56:19,317 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44689:44689),(127.0.0.1/127.0.0.1:34939:34939)] 2024-11-19T04:56:19,317 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T04:56:19,318 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T04:56:19,318 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T04:56:19,318 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T04:56:19,318 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T04:56:19,318 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:56:19,318 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T04:56:19,318 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T04:56:19,320 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T04:56:19,321 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T04:56:19,321 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:56:19,321 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:56:19,321 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T04:56:19,322 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T04:56:19,322 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:56:19,323 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:56:19,323 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T04:56:19,323 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T04:56:19,323 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:56:19,324 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:56:19,324 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T04:56:19,324 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T04:56:19,324 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:56:19,325 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:56:19,325 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T04:56:19,326 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/hbase/meta/1588230740 2024-11-19T04:56:19,327 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/hbase/meta/1588230740 2024-11-19T04:56:19,328 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T04:56:19,328 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T04:56:19,329 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T04:56:19,330 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T04:56:19,331 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=801785, jitterRate=0.01952245831489563}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T04:56:19,331 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T04:56:19,331 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731992179318Writing region info on filesystem at 1731992179319 (+1 ms)Initializing all the Stores at 1731992179319Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992179319Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992179320 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992179320Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992179320Cleaning up temporary data from old regions at 1731992179328 (+8 ms)Running coprocessor post-open hooks at 1731992179331 (+3 ms)Region opened successfully at 1731992179331 2024-11-19T04:56:19,332 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731992179297 2024-11-19T04:56:19,335 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T04:56:19,335 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T04:56:19,336 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=08a7f35e60d4,45383,1731992178519 2024-11-19T04:56:19,337 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 08a7f35e60d4,45383,1731992178519, state=OPEN 2024-11-19T04:56:19,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:19,343 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=08a7f35e60d4,45383,1731992178519 2024-11-19T04:56:19,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1012e9539a60000, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T04:56:19,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45383-0x1012e9539a60001, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T04:56:19,343 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T04:56:19,343 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T04:56:19,346 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T04:56:19,346 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=08a7f35e60d4,45383,1731992178519 in 200 msec 2024-11-19T04:56:19,349 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T04:56:19,349 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 612 msec 2024-11-19T04:56:19,350 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T04:56:19,350 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T04:56:19,351 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T04:56:19,351 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=08a7f35e60d4,45383,1731992178519, seqNum=-1] 2024-11-19T04:56:19,352 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T04:56:19,353 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41021, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T04:56:19,359 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 663 msec 2024-11-19T04:56:19,360 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731992179359, completionTime=-1 2024-11-19T04:56:19,360 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T04:56:19,360 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-19T04:56:19,362 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-19T04:56:19,362 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731992239362 2024-11-19T04:56:19,362 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731992299362 2024-11-19T04:56:19,362 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-19T04:56:19,362 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,40549,1731992178470-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T04:56:19,362 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,40549,1731992178470-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:56:19,362 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,40549,1731992178470-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:56:19,362 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-08a7f35e60d4:40549, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:56:19,362 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T04:56:19,362 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T04:56:19,364 DEBUG [master/08a7f35e60d4:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T04:56:19,366 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.810sec 2024-11-19T04:56:19,366 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T04:56:19,366 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T04:56:19,366 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T04:56:19,366 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T04:56:19,366 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T04:56:19,366 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,40549,1731992178470-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T04:56:19,366 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,40549,1731992178470-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T04:56:19,368 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T04:56:19,368 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T04:56:19,368 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,40549,1731992178470-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:56:19,436 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10c2896a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T04:56:19,436 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 08a7f35e60d4,40549,-1 for getting cluster id 2024-11-19T04:56:19,436 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T04:56:19,438 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd71c29aa-8a4b-42a0-b4fc-b8025333c5aa' 2024-11-19T04:56:19,439 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T04:56:19,439 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d71c29aa-8a4b-42a0-b4fc-b8025333c5aa" 2024-11-19T04:56:19,439 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f24cede, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T04:56:19,439 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [08a7f35e60d4,40549,-1] 2024-11-19T04:56:19,439 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T04:56:19,440 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:56:19,441 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51150, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T04:56:19,442 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b7fc8f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T04:56:19,442 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T04:56:19,443 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=08a7f35e60d4,45383,1731992178519, seqNum=-1] 2024-11-19T04:56:19,444 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T04:56:19,445 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49200, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T04:56:19,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=08a7f35e60d4,40549,1731992178470 2024-11-19T04:56:19,447 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:56:19,450 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-19T04:56:19,450 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-19T04:56:19,451 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 08a7f35e60d4,40549,1731992178470 2024-11-19T04:56:19,451 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@fca45ce 2024-11-19T04:56:19,451 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-19T04:56:19,452 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51156, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-19T04:56:19,453 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-19T04:56:19,453 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-19T04:56:19,453 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T04:56:19,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T04:56:19,456 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-19T04:56:19,456 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:56:19,456 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-19T04:56:19,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T04:56:19,457 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-19T04:56:19,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741835_1011 (size=405) 2024-11-19T04:56:19,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39585 is added to blk_1073741835_1011 (size=405) 2024-11-19T04:56:19,467 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => f3079176b64f8122eff94870174a66ff, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e 2024-11-19T04:56:19,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741836_1012 (size=88) 2024-11-19T04:56:19,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39585 is added to blk_1073741836_1012 (size=88) 2024-11-19T04:56:19,474 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:56:19,474 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing f3079176b64f8122eff94870174a66ff, disabling compactions & flushes 2024-11-19T04:56:19,474 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff. 2024-11-19T04:56:19,475 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff. 2024-11-19T04:56:19,475 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff. after waiting 0 ms 2024-11-19T04:56:19,475 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff. 2024-11-19T04:56:19,475 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff. 2024-11-19T04:56:19,475 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for f3079176b64f8122eff94870174a66ff: Waiting for close lock at 1731992179474Disabling compacts and flushes for region at 1731992179474Disabling writes for close at 1731992179475 (+1 ms)Writing region close event to WAL at 1731992179475Closed at 1731992179475 2024-11-19T04:56:19,476 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-19T04:56:19,476 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731992179476"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731992179476"}]},"ts":"1731992179476"} 2024-11-19T04:56:19,479 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-19T04:56:19,480 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-19T04:56:19,480 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731992179480"}]},"ts":"1731992179480"} 2024-11-19T04:56:19,482 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-19T04:56:19,482 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=f3079176b64f8122eff94870174a66ff, ASSIGN}] 2024-11-19T04:56:19,483 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=f3079176b64f8122eff94870174a66ff, ASSIGN 2024-11-19T04:56:19,485 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=f3079176b64f8122eff94870174a66ff, ASSIGN; state=OFFLINE, location=08a7f35e60d4,45383,1731992178519; forceNewPlan=false, retain=false 2024-11-19T04:56:19,635 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=f3079176b64f8122eff94870174a66ff, regionState=OPENING, regionLocation=08a7f35e60d4,45383,1731992178519 2024-11-19T04:56:19,638 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=f3079176b64f8122eff94870174a66ff, ASSIGN because future has completed 2024-11-19T04:56:19,639 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure f3079176b64f8122eff94870174a66ff, server=08a7f35e60d4,45383,1731992178519}] 2024-11-19T04:56:19,796 INFO [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff. 2024-11-19T04:56:19,796 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => f3079176b64f8122eff94870174a66ff, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff.', STARTKEY => '', ENDKEY => ''} 2024-11-19T04:56:19,796 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling f3079176b64f8122eff94870174a66ff 2024-11-19T04:56:19,796 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:56:19,797 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for f3079176b64f8122eff94870174a66ff 2024-11-19T04:56:19,797 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for f3079176b64f8122eff94870174a66ff 2024-11-19T04:56:19,798 INFO [StoreOpener-f3079176b64f8122eff94870174a66ff-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region f3079176b64f8122eff94870174a66ff 2024-11-19T04:56:19,799 INFO [StoreOpener-f3079176b64f8122eff94870174a66ff-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f3079176b64f8122eff94870174a66ff columnFamilyName info 2024-11-19T04:56:19,799 DEBUG [StoreOpener-f3079176b64f8122eff94870174a66ff-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:56:19,800 INFO [StoreOpener-f3079176b64f8122eff94870174a66ff-1 {}] regionserver.HStore(327): Store=f3079176b64f8122eff94870174a66ff/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T04:56:19,800 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for f3079176b64f8122eff94870174a66ff 2024-11-19T04:56:19,801 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff 2024-11-19T04:56:19,801 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff 2024-11-19T04:56:19,801 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for f3079176b64f8122eff94870174a66ff 2024-11-19T04:56:19,801 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for f3079176b64f8122eff94870174a66ff 2024-11-19T04:56:19,803 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for f3079176b64f8122eff94870174a66ff 2024-11-19T04:56:19,806 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T04:56:19,806 INFO [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened f3079176b64f8122eff94870174a66ff; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=827411, jitterRate=0.05210763216018677}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T04:56:19,806 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f3079176b64f8122eff94870174a66ff 2024-11-19T04:56:19,807 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for f3079176b64f8122eff94870174a66ff: Running coprocessor pre-open hook at 1731992179797Writing region info on filesystem at 1731992179797Initializing all the Stores at 1731992179797Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992179797Cleaning up temporary data from old regions at 1731992179802 (+5 ms)Running coprocessor post-open hooks at 1731992179806 (+4 ms)Region opened successfully at 1731992179807 (+1 ms) 2024-11-19T04:56:19,808 INFO [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff., pid=6, masterSystemTime=1731992179792 2024-11-19T04:56:19,811 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff. 2024-11-19T04:56:19,811 INFO [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff. 2024-11-19T04:56:19,812 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=f3079176b64f8122eff94870174a66ff, regionState=OPEN, openSeqNum=2, regionLocation=08a7f35e60d4,45383,1731992178519 2024-11-19T04:56:19,815 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure f3079176b64f8122eff94870174a66ff, server=08a7f35e60d4,45383,1731992178519 because future has completed 2024-11-19T04:56:19,818 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-19T04:56:19,819 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure f3079176b64f8122eff94870174a66ff, server=08a7f35e60d4,45383,1731992178519 in 177 msec 2024-11-19T04:56:19,822 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-19T04:56:19,822 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=f3079176b64f8122eff94870174a66ff, ASSIGN in 337 msec 2024-11-19T04:56:19,823 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-19T04:56:19,823 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731992179823"}]},"ts":"1731992179823"} 2024-11-19T04:56:19,826 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-19T04:56:19,827 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-19T04:56:19,829 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 374 msec 2024-11-19T04:56:20,118 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:20,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:21,119 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:21,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:22,119 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:22,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:22,649 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T04:56:22,650 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:22,650 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:22,650 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:22,650 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:22,668 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:22,669 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:22,669 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:22,669 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:22,670 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:22,670 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:22,674 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:22,674 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:22,674 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:22,677 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:56:23,120 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:23,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:24,121 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:24,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:24,766 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-19T04:56:24,767 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-19T04:56:25,121 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:25,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:26,122 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:26,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:27,123 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:27,146 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T04:56:27,146 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-19T04:56:27,147 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T04:56:27,147 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-19T04:56:27,147 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-19T04:56:27,147 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-19T04:56:27,148 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T04:56:27,148 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-19T04:56:27,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:28,123 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:28,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:29,124 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:29,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:29,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T04:56:29,539 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-19T04:56:29,539 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-19T04:56:29,542 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T04:56:29,542 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff. 2024-11-19T04:56:29,545 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff., hostname=08a7f35e60d4,45383,1731992178519, seqNum=2] 2024-11-19T04:56:29,553 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T04:56:29,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T04:56:29,561 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-19T04:56:29,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-19T04:56:29,562 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T04:56:29,563 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T04:56:29,724 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45383 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-19T04:56:29,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff. 2024-11-19T04:56:29,725 INFO [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing f3079176b64f8122eff94870174a66ff 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-19T04:56:29,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/.tmp/info/7994b803dd0f42b5bd80c77a75f15fec is 1080, key is row0001/info:/1731992189547/Put/seqid=0 2024-11-19T04:56:29,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39585 is added to blk_1073741837_1013 (size=6033) 2024-11-19T04:56:29,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741837_1013 (size=6033) 2024-11-19T04:56:29,749 INFO [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/.tmp/info/7994b803dd0f42b5bd80c77a75f15fec 2024-11-19T04:56:29,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/.tmp/info/7994b803dd0f42b5bd80c77a75f15fec as hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/info/7994b803dd0f42b5bd80c77a75f15fec 2024-11-19T04:56:29,763 INFO [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/info/7994b803dd0f42b5bd80c77a75f15fec, entries=1, sequenceid=5, filesize=5.9 K 2024-11-19T04:56:29,764 INFO [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for f3079176b64f8122eff94870174a66ff in 39ms, sequenceid=5, compaction requested=false 2024-11-19T04:56:29,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for f3079176b64f8122eff94870174a66ff: 2024-11-19T04:56:29,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff. 2024-11-19T04:56:29,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-19T04:56:29,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-19T04:56:29,773 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-19T04:56:29,773 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 207 msec 2024-11-19T04:56:29,775 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 218 msec 2024-11-19T04:56:30,124 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:30,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:31,125 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:31,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:32,126 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:32,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:33,126 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:33,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:34,127 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:34,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:35,128 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:35,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:36,128 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:36,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:37,129 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:37,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:38,129 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:38,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:39,130 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:39,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:39,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-19T04:56:39,608 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-19T04:56:39,611 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T04:56:39,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T04:56:39,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-19T04:56:39,614 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-19T04:56:39,615 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T04:56:39,615 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T04:56:39,768 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45383 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-19T04:56:39,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff. 2024-11-19T04:56:39,769 INFO [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing f3079176b64f8122eff94870174a66ff 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-19T04:56:39,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/.tmp/info/0d8f3a4291bc438396db076c067c1110 is 1080, key is row0002/info:/1731992199610/Put/seqid=0 2024-11-19T04:56:39,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741838_1014 (size=6033) 2024-11-19T04:56:39,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39585 is added to blk_1073741838_1014 (size=6033) 2024-11-19T04:56:39,781 INFO [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/.tmp/info/0d8f3a4291bc438396db076c067c1110 2024-11-19T04:56:39,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/.tmp/info/0d8f3a4291bc438396db076c067c1110 as hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/info/0d8f3a4291bc438396db076c067c1110 2024-11-19T04:56:39,802 INFO [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/info/0d8f3a4291bc438396db076c067c1110, entries=1, sequenceid=9, filesize=5.9 K 2024-11-19T04:56:39,803 INFO [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for f3079176b64f8122eff94870174a66ff in 34ms, sequenceid=9, compaction requested=false 2024-11-19T04:56:39,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for f3079176b64f8122eff94870174a66ff: 2024-11-19T04:56:39,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff. 2024-11-19T04:56:39,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-19T04:56:39,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-19T04:56:39,808 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-19T04:56:39,808 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 190 msec 2024-11-19T04:56:39,811 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 198 msec 2024-11-19T04:56:40,130 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:40,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:41,131 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:41,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:42,132 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:42,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:43,132 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:43,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:44,133 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:44,134 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 after 68047ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor192.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:56:44,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:44,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta after 68043ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor192.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T04:56:45,134 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:45,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:46,135 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:46,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:47,135 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:47,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:48,136 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:48,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:48,452 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T04:56:49,137 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:49,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:49,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-19T04:56:49,689 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-19T04:56:49,692 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C45383%2C1731992178519.1731992209692 2024-11-19T04:56:49,704 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:49,704 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:49,704 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:49,704 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:49,704 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:49,704 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/WALs/08a7f35e60d4,45383,1731992178519/08a7f35e60d4%2C45383%2C1731992178519.1731992178904 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/WALs/08a7f35e60d4,45383,1731992178519/08a7f35e60d4%2C45383%2C1731992178519.1731992209692 2024-11-19T04:56:49,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741833_1009 (size=5546) 2024-11-19T04:56:49,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39585 is added to blk_1073741833_1009 (size=5546) 2024-11-19T04:56:49,717 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44689:44689),(127.0.0.1/127.0.0.1:34939:34939)] 2024-11-19T04:56:49,718 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T04:56:49,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T04:56:49,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-19T04:56:49,720 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-19T04:56:49,721 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T04:56:49,721 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T04:56:49,874 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45383 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-19T04:56:49,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff. 2024-11-19T04:56:49,875 INFO [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing f3079176b64f8122eff94870174a66ff 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-19T04:56:49,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/.tmp/info/5cd4127b05a04ceebd3f3a98669980bc is 1080, key is row0003/info:/1731992209690/Put/seqid=0 2024-11-19T04:56:49,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39585 is added to blk_1073741840_1016 (size=6033) 2024-11-19T04:56:49,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741840_1016 (size=6033) 2024-11-19T04:56:49,884 INFO [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/.tmp/info/5cd4127b05a04ceebd3f3a98669980bc 2024-11-19T04:56:49,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/.tmp/info/5cd4127b05a04ceebd3f3a98669980bc as hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/info/5cd4127b05a04ceebd3f3a98669980bc 2024-11-19T04:56:49,895 INFO [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/info/5cd4127b05a04ceebd3f3a98669980bc, entries=1, sequenceid=13, filesize=5.9 K 2024-11-19T04:56:49,896 INFO [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for f3079176b64f8122eff94870174a66ff in 21ms, sequenceid=13, compaction requested=true 2024-11-19T04:56:49,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for f3079176b64f8122eff94870174a66ff: 2024-11-19T04:56:49,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff. 2024-11-19T04:56:49,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-19T04:56:49,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-19T04:56:49,900 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-19T04:56:49,900 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 177 msec 2024-11-19T04:56:49,903 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 183 msec 2024-11-19T04:56:50,137 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:50,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:51,138 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:51,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:52,139 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:52,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:53,139 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:53,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:54,140 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:54,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:55,140 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:55,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:56,141 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:56,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:57,142 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:57,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:58,142 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:58,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:59,143 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:59,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:56:59,401 INFO [master/08a7f35e60d4:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-19T04:56:59,401 INFO [master/08a7f35e60d4:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-19T04:56:59,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-19T04:56:59,818 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-19T04:56:59,818 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T04:56:59,820 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T04:56:59,820 DEBUG [Time-limited test {}] regionserver.HStore(1541): f3079176b64f8122eff94870174a66ff/info is initiating minor compaction (all files) 2024-11-19T04:56:59,820 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T04:56:59,820 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T04:56:59,820 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of f3079176b64f8122eff94870174a66ff/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff. 2024-11-19T04:56:59,820 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/info/7994b803dd0f42b5bd80c77a75f15fec, hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/info/0d8f3a4291bc438396db076c067c1110, hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/info/5cd4127b05a04ceebd3f3a98669980bc] into tmpdir=hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/.tmp, totalSize=17.7 K 2024-11-19T04:56:59,821 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 7994b803dd0f42b5bd80c77a75f15fec, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731992189547 2024-11-19T04:56:59,821 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 0d8f3a4291bc438396db076c067c1110, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731992199610 2024-11-19T04:56:59,822 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 5cd4127b05a04ceebd3f3a98669980bc, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731992209690 2024-11-19T04:56:59,840 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): f3079176b64f8122eff94870174a66ff#info#compaction#43 average throughput is 3.08 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T04:56:59,841 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/.tmp/info/ef8df5523cab47f29e724787c66b72d3 is 1080, key is row0001/info:/1731992189547/Put/seqid=0 2024-11-19T04:56:59,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741841_1017 (size=8296) 2024-11-19T04:56:59,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39585 is added to blk_1073741841_1017 (size=8296) 2024-11-19T04:56:59,853 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/.tmp/info/ef8df5523cab47f29e724787c66b72d3 as hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/info/ef8df5523cab47f29e724787c66b72d3 2024-11-19T04:56:59,860 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f3079176b64f8122eff94870174a66ff/info of f3079176b64f8122eff94870174a66ff into ef8df5523cab47f29e724787c66b72d3(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T04:56:59,860 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for f3079176b64f8122eff94870174a66ff: 2024-11-19T04:56:59,863 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C45383%2C1731992178519.1731992219863 2024-11-19T04:56:59,870 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:59,870 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:59,870 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:59,871 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:59,871 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:56:59,871 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/WALs/08a7f35e60d4,45383,1731992178519/08a7f35e60d4%2C45383%2C1731992178519.1731992209692 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/WALs/08a7f35e60d4,45383,1731992178519/08a7f35e60d4%2C45383%2C1731992178519.1731992219863 2024-11-19T04:56:59,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39585 is added to blk_1073741839_1015 (size=2520) 2024-11-19T04:56:59,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741839_1015 (size=2520) 2024-11-19T04:56:59,877 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34939:34939),(127.0.0.1/127.0.0.1:44689:44689)] 2024-11-19T04:56:59,884 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/WALs/08a7f35e60d4,45383,1731992178519/08a7f35e60d4%2C45383%2C1731992178519.1731992178904 to hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/oldWALs/08a7f35e60d4%2C45383%2C1731992178519.1731992178904 2024-11-19T04:56:59,886 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T04:56:59,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T04:56:59,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-19T04:56:59,888 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-19T04:56:59,890 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T04:56:59,890 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T04:57:00,044 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45383 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-19T04:57:00,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff. 2024-11-19T04:57:00,044 INFO [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing f3079176b64f8122eff94870174a66ff 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-19T04:57:00,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/.tmp/info/3dbc98d7329c4bbebbe2194f6e932fb9 is 1080, key is row0000/info:/1731992219862/Put/seqid=0 2024-11-19T04:57:00,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39585 is added to blk_1073741843_1019 (size=6033) 2024-11-19T04:57:00,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741843_1019 (size=6033) 2024-11-19T04:57:00,055 INFO [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/.tmp/info/3dbc98d7329c4bbebbe2194f6e932fb9 2024-11-19T04:57:00,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/.tmp/info/3dbc98d7329c4bbebbe2194f6e932fb9 as hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/info/3dbc98d7329c4bbebbe2194f6e932fb9 2024-11-19T04:57:00,068 INFO [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/info/3dbc98d7329c4bbebbe2194f6e932fb9, entries=1, sequenceid=18, filesize=5.9 K 2024-11-19T04:57:00,069 INFO [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for f3079176b64f8122eff94870174a66ff in 25ms, sequenceid=18, compaction requested=false 2024-11-19T04:57:00,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for f3079176b64f8122eff94870174a66ff: 2024-11-19T04:57:00,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff. 2024-11-19T04:57:00,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-19T04:57:00,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-19T04:57:00,075 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-19T04:57:00,075 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 182 msec 2024-11-19T04:57:00,077 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 190 msec 2024-11-19T04:57:00,144 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:00,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:01,144 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:01,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:02,145 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:02,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:03,145 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:03,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:04,146 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:04,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:04,797 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region f3079176b64f8122eff94870174a66ff, had cached 0 bytes from a total of 14329 2024-11-19T04:57:05,147 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:05,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:06,147 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:06,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:07,148 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:07,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:08,149 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:08,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:09,149 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:09,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:09,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40549 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-19T04:57:09,898 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-19T04:57:09,902 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C45383%2C1731992178519.1731992229902 2024-11-19T04:57:09,914 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:57:09,914 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:57:09,914 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:57:09,914 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:57:09,914 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:57:09,914 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/WALs/08a7f35e60d4,45383,1731992178519/08a7f35e60d4%2C45383%2C1731992178519.1731992219863 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/WALs/08a7f35e60d4,45383,1731992178519/08a7f35e60d4%2C45383%2C1731992178519.1731992229902 2024-11-19T04:57:09,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741842_1018 (size=2026) 2024-11-19T04:57:09,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39585 is added to blk_1073741842_1018 (size=2026) 2024-11-19T04:57:09,917 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/WALs/08a7f35e60d4,45383,1731992178519/08a7f35e60d4%2C45383%2C1731992178519.1731992209692 to hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/oldWALs/08a7f35e60d4%2C45383%2C1731992178519.1731992209692 2024-11-19T04:57:09,924 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44689:44689),(127.0.0.1/127.0.0.1:34939:34939)] 2024-11-19T04:57:09,925 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T04:57:09,925 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T04:57:09,925 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T04:57:09,925 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:57:09,925 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:57:09,925 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T04:57:09,925 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T04:57:09,926 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1112712780, stopped=false 2024-11-19T04:57:09,926 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=08a7f35e60d4,40549,1731992178470 2024-11-19T04:57:09,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1012e9539a60000, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T04:57:09,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1012e9539a60000, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:57:09,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45383-0x1012e9539a60001, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T04:57:09,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45383-0x1012e9539a60001, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:57:09,928 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T04:57:09,928 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T04:57:09,928 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T04:57:09,928 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:57:09,928 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '08a7f35e60d4,45383,1731992178519' ***** 2024-11-19T04:57:09,928 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T04:57:09,928 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40549-0x1012e9539a60000, quorum=127.0.0.1:53417, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T04:57:09,929 INFO [RS:0;08a7f35e60d4:45383 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T04:57:09,929 INFO [RS:0;08a7f35e60d4:45383 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T04:57:09,929 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T04:57:09,929 INFO [RS:0;08a7f35e60d4:45383 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T04:57:09,929 INFO [RS:0;08a7f35e60d4:45383 {}] regionserver.HRegionServer(3091): Received CLOSE for f3079176b64f8122eff94870174a66ff 2024-11-19T04:57:09,929 INFO [RS:0;08a7f35e60d4:45383 {}] regionserver.HRegionServer(959): stopping server 08a7f35e60d4,45383,1731992178519 2024-11-19T04:57:09,929 INFO [RS:0;08a7f35e60d4:45383 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T04:57:09,929 INFO [RS:0;08a7f35e60d4:45383 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;08a7f35e60d4:45383. 2024-11-19T04:57:09,929 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45383-0x1012e9539a60001, quorum=127.0.0.1:53417, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T04:57:09,929 DEBUG [RS:0;08a7f35e60d4:45383 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T04:57:09,929 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing f3079176b64f8122eff94870174a66ff, disabling compactions & flushes 2024-11-19T04:57:09,929 DEBUG [RS:0;08a7f35e60d4:45383 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:57:09,929 INFO [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff. 2024-11-19T04:57:09,929 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff. 2024-11-19T04:57:09,929 INFO [RS:0;08a7f35e60d4:45383 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T04:57:09,930 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff. after waiting 0 ms 2024-11-19T04:57:09,930 INFO [RS:0;08a7f35e60d4:45383 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T04:57:09,930 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff. 2024-11-19T04:57:09,930 INFO [RS:0;08a7f35e60d4:45383 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T04:57:09,930 INFO [RS:0;08a7f35e60d4:45383 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T04:57:09,930 INFO [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing f3079176b64f8122eff94870174a66ff 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-19T04:57:09,930 INFO [RS:0;08a7f35e60d4:45383 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-19T04:57:09,930 DEBUG [RS:0;08a7f35e60d4:45383 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, f3079176b64f8122eff94870174a66ff=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff.} 2024-11-19T04:57:09,930 DEBUG [RS:0;08a7f35e60d4:45383 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, f3079176b64f8122eff94870174a66ff 2024-11-19T04:57:09,930 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T04:57:09,930 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T04:57:09,930 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T04:57:09,930 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T04:57:09,930 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T04:57:09,930 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-19T04:57:09,938 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/.tmp/info/27bef83bcc67418a9c798449bb0f5fb8 is 1080, key is row0001/info:/1731992229900/Put/seqid=0 2024-11-19T04:57:09,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741845_1021 (size=6033) 2024-11-19T04:57:09,949 INFO [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/.tmp/info/27bef83bcc67418a9c798449bb0f5fb8 2024-11-19T04:57:09,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39585 is added to blk_1073741845_1021 (size=6033) 2024-11-19T04:57:09,953 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/hbase/meta/1588230740/.tmp/info/42f2e03b2b2f4cc5a2ec7478cae3b8ce is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff./info:regioninfo/1731992179812/Put/seqid=0 2024-11-19T04:57:09,961 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/.tmp/info/27bef83bcc67418a9c798449bb0f5fb8 as hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/info/27bef83bcc67418a9c798449bb0f5fb8 2024-11-19T04:57:09,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39585 is added to blk_1073741846_1022 (size=7308) 2024-11-19T04:57:09,967 INFO [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/info/27bef83bcc67418a9c798449bb0f5fb8, entries=1, sequenceid=22, filesize=5.9 K 2024-11-19T04:57:09,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741846_1022 (size=7308) 2024-11-19T04:57:09,968 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/hbase/meta/1588230740/.tmp/info/42f2e03b2b2f4cc5a2ec7478cae3b8ce 2024-11-19T04:57:09,969 INFO [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for f3079176b64f8122eff94870174a66ff in 38ms, sequenceid=22, compaction requested=true 2024-11-19T04:57:09,969 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/info/7994b803dd0f42b5bd80c77a75f15fec, hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/info/0d8f3a4291bc438396db076c067c1110, hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/info/5cd4127b05a04ceebd3f3a98669980bc] to archive 2024-11-19T04:57:09,970 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T04:57:09,972 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/info/7994b803dd0f42b5bd80c77a75f15fec to hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/info/7994b803dd0f42b5bd80c77a75f15fec 2024-11-19T04:57:09,975 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/info/0d8f3a4291bc438396db076c067c1110 to hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/info/0d8f3a4291bc438396db076c067c1110 2024-11-19T04:57:09,977 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/info/5cd4127b05a04ceebd3f3a98669980bc to hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/info/5cd4127b05a04ceebd3f3a98669980bc 2024-11-19T04:57:09,977 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=08a7f35e60d4:40549 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-19T04:57:09,977 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [7994b803dd0f42b5bd80c77a75f15fec=6033, 0d8f3a4291bc438396db076c067c1110=6033, 5cd4127b05a04ceebd3f3a98669980bc=6033] 2024-11-19T04:57:09,982 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/f3079176b64f8122eff94870174a66ff/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-19T04:57:09,983 INFO [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff. 2024-11-19T04:57:09,983 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for f3079176b64f8122eff94870174a66ff: Waiting for close lock at 1731992229929Running coprocessor pre-close hooks at 1731992229929Disabling compacts and flushes for region at 1731992229929Disabling writes for close at 1731992229930 (+1 ms)Obtaining lock to block concurrent updates at 1731992229930Preparing flush snapshotting stores in f3079176b64f8122eff94870174a66ff at 1731992229930Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731992229930Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff. at 1731992229932 (+2 ms)Flushing f3079176b64f8122eff94870174a66ff/info: creating writer at 1731992229932Flushing f3079176b64f8122eff94870174a66ff/info: appending metadata at 1731992229937 (+5 ms)Flushing f3079176b64f8122eff94870174a66ff/info: closing flushed file at 1731992229937Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@32240184: reopening flushed file at 1731992229960 (+23 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for f3079176b64f8122eff94870174a66ff in 38ms, sequenceid=22, compaction requested=true at 1731992229969 (+9 ms)Writing region close event to WAL at 1731992229978 (+9 ms)Running coprocessor post-close hooks at 1731992229983 (+5 ms)Closed at 1731992229983 2024-11-19T04:57:09,983 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731992179453.f3079176b64f8122eff94870174a66ff. 2024-11-19T04:57:09,990 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/hbase/meta/1588230740/.tmp/ns/e6c7cae246654565954cef82853bdc9e is 43, key is default/ns:d/1731992179354/Put/seqid=0 2024-11-19T04:57:09,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39585 is added to blk_1073741847_1023 (size=5153) 2024-11-19T04:57:09,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741847_1023 (size=5153) 2024-11-19T04:57:09,999 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/hbase/meta/1588230740/.tmp/ns/e6c7cae246654565954cef82853bdc9e 2024-11-19T04:57:10,022 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/hbase/meta/1588230740/.tmp/table/68364d0426ee44e49b7cd735566c7068 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731992179823/Put/seqid=0 2024-11-19T04:57:10,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741848_1024 (size=5508) 2024-11-19T04:57:10,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39585 is added to blk_1073741848_1024 (size=5508) 2024-11-19T04:57:10,032 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/hbase/meta/1588230740/.tmp/table/68364d0426ee44e49b7cd735566c7068 2024-11-19T04:57:10,038 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/hbase/meta/1588230740/.tmp/info/42f2e03b2b2f4cc5a2ec7478cae3b8ce as hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/hbase/meta/1588230740/info/42f2e03b2b2f4cc5a2ec7478cae3b8ce 2024-11-19T04:57:10,044 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/hbase/meta/1588230740/info/42f2e03b2b2f4cc5a2ec7478cae3b8ce, entries=10, sequenceid=11, filesize=7.1 K 2024-11-19T04:57:10,045 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/hbase/meta/1588230740/.tmp/ns/e6c7cae246654565954cef82853bdc9e as hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/hbase/meta/1588230740/ns/e6c7cae246654565954cef82853bdc9e 2024-11-19T04:57:10,051 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/hbase/meta/1588230740/ns/e6c7cae246654565954cef82853bdc9e, entries=2, sequenceid=11, filesize=5.0 K 2024-11-19T04:57:10,053 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/hbase/meta/1588230740/.tmp/table/68364d0426ee44e49b7cd735566c7068 as hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/hbase/meta/1588230740/table/68364d0426ee44e49b7cd735566c7068 2024-11-19T04:57:10,059 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/hbase/meta/1588230740/table/68364d0426ee44e49b7cd735566c7068, entries=2, sequenceid=11, filesize=5.4 K 2024-11-19T04:57:10,060 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 130ms, sequenceid=11, compaction requested=false 2024-11-19T04:57:10,065 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-19T04:57:10,066 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T04:57:10,066 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T04:57:10,066 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731992229930Running coprocessor pre-close hooks at 1731992229930Disabling compacts and flushes for region at 1731992229930Disabling writes for close at 1731992229930Obtaining lock to block concurrent updates at 1731992229931 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731992229931Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731992229931Flushing stores of hbase:meta,,1.1588230740 at 1731992229932 (+1 ms)Flushing 1588230740/info: creating writer at 1731992229932Flushing 1588230740/info: appending metadata at 1731992229952 (+20 ms)Flushing 1588230740/info: closing flushed file at 1731992229952Flushing 1588230740/ns: creating writer at 1731992229974 (+22 ms)Flushing 1588230740/ns: appending metadata at 1731992229990 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1731992229990Flushing 1588230740/table: creating writer at 1731992230004 (+14 ms)Flushing 1588230740/table: appending metadata at 1731992230021 (+17 ms)Flushing 1588230740/table: closing flushed file at 1731992230021Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5d485395: reopening flushed file at 1731992230037 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@43b50924: reopening flushed file at 1731992230044 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6831dfd7: reopening flushed file at 1731992230052 (+8 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 130ms, sequenceid=11, compaction requested=false at 1731992230060 (+8 ms)Writing region close event to WAL at 1731992230062 (+2 ms)Running coprocessor post-close hooks at 1731992230066 (+4 ms)Closed at 1731992230066 2024-11-19T04:57:10,066 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T04:57:10,130 INFO [RS:0;08a7f35e60d4:45383 {}] regionserver.HRegionServer(976): stopping server 08a7f35e60d4,45383,1731992178519; all regions closed. 2024-11-19T04:57:10,131 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:57:10,131 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:57:10,131 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:57:10,131 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:57:10,131 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:57:10,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39585 is added to blk_1073741834_1010 (size=3306) 2024-11-19T04:57:10,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741834_1010 (size=3306) 2024-11-19T04:57:10,140 DEBUG [RS:0;08a7f35e60d4:45383 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/oldWALs 2024-11-19T04:57:10,141 INFO [RS:0;08a7f35e60d4:45383 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 08a7f35e60d4%2C45383%2C1731992178519.meta:.meta(num 1731992179306) 2024-11-19T04:57:10,141 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:57:10,141 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:57:10,141 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:57:10,141 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:57:10,141 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:57:10,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39585 is added to blk_1073741844_1020 (size=1252) 2024-11-19T04:57:10,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741844_1020 (size=1252) 2024-11-19T04:57:10,148 DEBUG [RS:0;08a7f35e60d4:45383 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/oldWALs 2024-11-19T04:57:10,148 INFO [RS:0;08a7f35e60d4:45383 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 08a7f35e60d4%2C45383%2C1731992178519:(num 1731992229902) 2024-11-19T04:57:10,148 DEBUG [RS:0;08a7f35e60d4:45383 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:57:10,148 INFO [RS:0;08a7f35e60d4:45383 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T04:57:10,148 INFO [RS:0;08a7f35e60d4:45383 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T04:57:10,148 INFO [RS:0;08a7f35e60d4:45383 {}] hbase.ChoreService(370): Chore service for: regionserver/08a7f35e60d4:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-19T04:57:10,148 INFO [RS:0;08a7f35e60d4:45383 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T04:57:10,148 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T04:57:10,149 INFO [RS:0;08a7f35e60d4:45383 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45383 2024-11-19T04:57:10,150 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:10,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45383-0x1012e9539a60001, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/08a7f35e60d4,45383,1731992178519 2024-11-19T04:57:10,152 INFO [RS:0;08a7f35e60d4:45383 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T04:57:10,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1012e9539a60000, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T04:57:10,153 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [08a7f35e60d4,45383,1731992178519] 2024-11-19T04:57:10,156 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/08a7f35e60d4,45383,1731992178519 already deleted, retry=false 2024-11-19T04:57:10,156 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 08a7f35e60d4,45383,1731992178519 expired; onlineServers=0 2024-11-19T04:57:10,156 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '08a7f35e60d4,40549,1731992178470' ***** 2024-11-19T04:57:10,156 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T04:57:10,157 INFO [M:0;08a7f35e60d4:40549 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T04:57:10,157 INFO [M:0;08a7f35e60d4:40549 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T04:57:10,157 DEBUG [M:0;08a7f35e60d4:40549 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T04:57:10,157 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T04:57:10,157 DEBUG [M:0;08a7f35e60d4:40549 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T04:57:10,157 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.large.0-1731992178703 {}] cleaner.HFileCleaner(306): Exit Thread[master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.large.0-1731992178703,5,FailOnTimeoutGroup] 2024-11-19T04:57:10,157 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.small.0-1731992178704 {}] cleaner.HFileCleaner(306): Exit Thread[master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.small.0-1731992178704,5,FailOnTimeoutGroup] 2024-11-19T04:57:10,157 INFO [M:0;08a7f35e60d4:40549 {}] hbase.ChoreService(370): Chore service for: master/08a7f35e60d4:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T04:57:10,157 INFO [M:0;08a7f35e60d4:40549 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T04:57:10,157 DEBUG [M:0;08a7f35e60d4:40549 {}] master.HMaster(1795): Stopping service threads 2024-11-19T04:57:10,157 INFO [M:0;08a7f35e60d4:40549 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T04:57:10,157 INFO [M:0;08a7f35e60d4:40549 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T04:57:10,158 INFO [M:0;08a7f35e60d4:40549 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T04:57:10,158 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T04:57:10,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1012e9539a60000, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T04:57:10,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1012e9539a60000, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:57:10,159 DEBUG [M:0;08a7f35e60d4:40549 {}] zookeeper.ZKUtil(347): master:40549-0x1012e9539a60000, quorum=127.0.0.1:53417, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T04:57:10,159 WARN [M:0;08a7f35e60d4:40549 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T04:57:10,160 INFO [M:0;08a7f35e60d4:40549 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/.lastflushedseqids 2024-11-19T04:57:10,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39585 is added to blk_1073741849_1025 (size=130) 2024-11-19T04:57:10,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741849_1025 (size=130) 2024-11-19T04:57:10,172 INFO [M:0;08a7f35e60d4:40549 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T04:57:10,172 INFO [M:0;08a7f35e60d4:40549 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T04:57:10,172 DEBUG [M:0;08a7f35e60d4:40549 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T04:57:10,172 INFO [M:0;08a7f35e60d4:40549 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:57:10,172 DEBUG [M:0;08a7f35e60d4:40549 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:57:10,172 DEBUG [M:0;08a7f35e60d4:40549 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T04:57:10,172 DEBUG [M:0;08a7f35e60d4:40549 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:57:10,173 INFO [M:0;08a7f35e60d4:40549 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.60 KB heapSize=55.01 KB 2024-11-19T04:57:10,192 DEBUG [M:0;08a7f35e60d4:40549 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/182fb5816b9f426eb8b256673a4f17d5 is 82, key is hbase:meta,,1/info:regioninfo/1731992179336/Put/seqid=0 2024-11-19T04:57:10,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741850_1026 (size=5672) 2024-11-19T04:57:10,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39585 is added to blk_1073741850_1026 (size=5672) 2024-11-19T04:57:10,199 INFO [M:0;08a7f35e60d4:40549 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/182fb5816b9f426eb8b256673a4f17d5 2024-11-19T04:57:10,221 DEBUG [M:0;08a7f35e60d4:40549 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/03a950d948e44410bd71779d3698b66a is 799, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731992179828/Put/seqid=0 2024-11-19T04:57:10,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39585 is added to blk_1073741851_1027 (size=7824) 2024-11-19T04:57:10,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741851_1027 (size=7824) 2024-11-19T04:57:10,228 INFO [M:0;08a7f35e60d4:40549 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=43.00 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/03a950d948e44410bd71779d3698b66a 2024-11-19T04:57:10,234 INFO [M:0;08a7f35e60d4:40549 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 03a950d948e44410bd71779d3698b66a 2024-11-19T04:57:10,252 DEBUG [M:0;08a7f35e60d4:40549 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/bc372884c70e456191b516c024190d2c is 69, key is 08a7f35e60d4,45383,1731992178519/rs:state/1731992178757/Put/seqid=0 2024-11-19T04:57:10,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45383-0x1012e9539a60001, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T04:57:10,254 INFO [RS:0;08a7f35e60d4:45383 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T04:57:10,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45383-0x1012e9539a60001, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T04:57:10,254 INFO [RS:0;08a7f35e60d4:45383 {}] regionserver.HRegionServer(1031): Exiting; stopping=08a7f35e60d4,45383,1731992178519; zookeeper connection closed. 2024-11-19T04:57:10,254 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2c2f32df {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2c2f32df 2024-11-19T04:57:10,255 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-19T04:57:10,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741852_1028 (size=5156) 2024-11-19T04:57:10,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39585 is added to blk_1073741852_1028 (size=5156) 2024-11-19T04:57:10,260 INFO [M:0;08a7f35e60d4:40549 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/bc372884c70e456191b516c024190d2c 2024-11-19T04:57:10,287 DEBUG [M:0;08a7f35e60d4:40549 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f69b47f9f25a4b01879949ecef88384c is 52, key is load_balancer_on/state:d/1731992179449/Put/seqid=0 2024-11-19T04:57:10,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741853_1029 (size=5056) 2024-11-19T04:57:10,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39585 is added to blk_1073741853_1029 (size=5056) 2024-11-19T04:57:10,293 INFO [M:0;08a7f35e60d4:40549 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f69b47f9f25a4b01879949ecef88384c 2024-11-19T04:57:10,299 DEBUG [M:0;08a7f35e60d4:40549 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/182fb5816b9f426eb8b256673a4f17d5 as hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/182fb5816b9f426eb8b256673a4f17d5 2024-11-19T04:57:10,304 INFO [M:0;08a7f35e60d4:40549 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/182fb5816b9f426eb8b256673a4f17d5, entries=8, sequenceid=121, filesize=5.5 K 2024-11-19T04:57:10,305 DEBUG [M:0;08a7f35e60d4:40549 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/03a950d948e44410bd71779d3698b66a as hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/03a950d948e44410bd71779d3698b66a 2024-11-19T04:57:10,310 INFO [M:0;08a7f35e60d4:40549 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 03a950d948e44410bd71779d3698b66a 2024-11-19T04:57:10,310 INFO [M:0;08a7f35e60d4:40549 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/03a950d948e44410bd71779d3698b66a, entries=14, sequenceid=121, filesize=7.6 K 2024-11-19T04:57:10,311 DEBUG [M:0;08a7f35e60d4:40549 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/bc372884c70e456191b516c024190d2c as hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/bc372884c70e456191b516c024190d2c 2024-11-19T04:57:10,316 INFO [M:0;08a7f35e60d4:40549 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/bc372884c70e456191b516c024190d2c, entries=1, sequenceid=121, filesize=5.0 K 2024-11-19T04:57:10,317 DEBUG [M:0;08a7f35e60d4:40549 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f69b47f9f25a4b01879949ecef88384c as hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f69b47f9f25a4b01879949ecef88384c 2024-11-19T04:57:10,322 INFO [M:0;08a7f35e60d4:40549 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42051/user/jenkins/test-data/98c34bd3-53ac-e16f-b6cd-947ed70d136e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f69b47f9f25a4b01879949ecef88384c, entries=1, sequenceid=121, filesize=4.9 K 2024-11-19T04:57:10,323 INFO [M:0;08a7f35e60d4:40549 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.60 KB/44650, heapSize ~54.95 KB/56264, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 151ms, sequenceid=121, compaction requested=false 2024-11-19T04:57:10,325 INFO [M:0;08a7f35e60d4:40549 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:57:10,325 DEBUG [M:0;08a7f35e60d4:40549 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731992230172Disabling compacts and flushes for region at 1731992230172Disabling writes for close at 1731992230172Obtaining lock to block concurrent updates at 1731992230173 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731992230173Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44650, getHeapSize=56264, getOffHeapSize=0, getCellsCount=140 at 1731992230173Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731992230174 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731992230174Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731992230192 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731992230192Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731992230204 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731992230220 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731992230220Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731992230234 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731992230251 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731992230251Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731992230266 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731992230286 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731992230286Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d865535: reopening flushed file at 1731992230298 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1301d5f6: reopening flushed file at 1731992230304 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5cde2a88: reopening flushed file at 1731992230310 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5e063d91: reopening flushed file at 1731992230316 (+6 ms)Finished flush of dataSize ~43.60 KB/44650, heapSize ~54.95 KB/56264, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 151ms, sequenceid=121, compaction requested=false at 1731992230323 (+7 ms)Writing region close event to WAL at 1731992230325 (+2 ms)Closed at 1731992230325 2024-11-19T04:57:10,326 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:57:10,326 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:57:10,326 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:57:10,326 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:57:10,326 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:57:10,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42689 is added to blk_1073741830_1006 (size=53047) 2024-11-19T04:57:10,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39585 is added to blk_1073741830_1006 (size=53047) 2024-11-19T04:57:10,329 INFO [M:0;08a7f35e60d4:40549 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T04:57:10,329 INFO [M:0;08a7f35e60d4:40549 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40549 2024-11-19T04:57:10,329 INFO [M:0;08a7f35e60d4:40549 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T04:57:10,330 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T04:57:10,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:10,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1012e9539a60000, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T04:57:10,432 INFO [M:0;08a7f35e60d4:40549 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T04:57:10,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40549-0x1012e9539a60000, quorum=127.0.0.1:53417, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T04:57:10,435 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@677a249b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:57:10,435 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e21aaf2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T04:57:10,435 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T04:57:10,436 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4732430a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T04:57:10,436 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e470e04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/hadoop.log.dir/,STOPPED} 2024-11-19T04:57:10,438 WARN [BP-417346895-172.17.0.2-1731992177654 heartbeating to localhost/127.0.0.1:42051 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T04:57:10,438 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T04:57:10,438 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T04:57:10,438 WARN [BP-417346895-172.17.0.2-1731992177654 heartbeating to localhost/127.0.0.1:42051 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-417346895-172.17.0.2-1731992177654 (Datanode Uuid 6b711a01-e1a6-4dbd-8f74-a527a7839e4e) service to localhost/127.0.0.1:42051 2024-11-19T04:57:10,439 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/cluster_ad7551c8-4c43-5df4-4828-94fc57682521/data/data3/current/BP-417346895-172.17.0.2-1731992177654 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:57:10,440 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/cluster_ad7551c8-4c43-5df4-4828-94fc57682521/data/data4/current/BP-417346895-172.17.0.2-1731992177654 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:57:10,440 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T04:57:10,445 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4114613b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:57:10,445 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@21edc7fd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T04:57:10,445 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T04:57:10,446 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@f02078{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T04:57:10,446 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@214c2124{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/hadoop.log.dir/,STOPPED} 2024-11-19T04:57:10,450 WARN [BP-417346895-172.17.0.2-1731992177654 heartbeating to localhost/127.0.0.1:42051 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T04:57:10,450 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T04:57:10,450 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T04:57:10,450 WARN [BP-417346895-172.17.0.2-1731992177654 heartbeating to localhost/127.0.0.1:42051 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-417346895-172.17.0.2-1731992177654 (Datanode Uuid 31e542b5-9bd7-4c78-9bf5-3902c20471cb) service to localhost/127.0.0.1:42051 2024-11-19T04:57:10,451 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/cluster_ad7551c8-4c43-5df4-4828-94fc57682521/data/data2/current/BP-417346895-172.17.0.2-1731992177654 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:57:10,451 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/cluster_ad7551c8-4c43-5df4-4828-94fc57682521/data/data1/current/BP-417346895-172.17.0.2-1731992177654 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:57:10,451 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T04:57:10,458 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@16208fe2{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T04:57:10,459 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4a5372a5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T04:57:10,459 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T04:57:10,459 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@438136f2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T04:57:10,459 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7a7d1da5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/hadoop.log.dir/,STOPPED} 2024-11-19T04:57:10,466 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T04:57:10,484 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T04:57:10,494 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=209 (was 181) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42051 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42051 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42051 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42051 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42051 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:42051 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42051 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/08a7f35e60d4:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42051 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42051 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=190 (was 190), ProcessCount=11 (was 11), AvailableMemoryMB=11276 (was 11549) 2024-11-19T04:57:10,503 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=209, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=190, ProcessCount=11, AvailableMemoryMB=11276 2024-11-19T04:57:10,504 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T04:57:10,504 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/hadoop.log.dir so I do NOT create it in target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c 2024-11-19T04:57:10,504 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/293124a3-037f-c4c1-f2ce-341cd2ecfc77/hadoop.tmp.dir so I do NOT create it in target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c 2024-11-19T04:57:10,504 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/cluster_de53dc38-4034-b207-c060-c6ceccacd599, deleteOnExit=true 2024-11-19T04:57:10,504 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T04:57:10,504 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/test.cache.data in system properties and HBase conf 2024-11-19T04:57:10,505 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T04:57:10,505 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/hadoop.log.dir in system properties and HBase conf 2024-11-19T04:57:10,505 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T04:57:10,505 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T04:57:10,505 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T04:57:10,505 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T04:57:10,505 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T04:57:10,505 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T04:57:10,505 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T04:57:10,505 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T04:57:10,505 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T04:57:10,505 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T04:57:10,505 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T04:57:10,505 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T04:57:10,505 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T04:57:10,505 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/nfs.dump.dir in system properties and HBase conf 2024-11-19T04:57:10,505 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/java.io.tmpdir in system properties and HBase conf 2024-11-19T04:57:10,505 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T04:57:10,506 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T04:57:10,506 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T04:57:10,521 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T04:57:10,594 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:57:10,599 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T04:57:10,601 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T04:57:10,601 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T04:57:10,601 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T04:57:10,602 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:57:10,602 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4cf57465{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/hadoop.log.dir/,AVAILABLE} 2024-11-19T04:57:10,602 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59703725{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T04:57:10,723 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@62c26df6{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/java.io.tmpdir/jetty-localhost-40063-hadoop-hdfs-3_4_1-tests_jar-_-any-6037228699527412006/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T04:57:10,723 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@8987cea{HTTP/1.1, (http/1.1)}{localhost:40063} 2024-11-19T04:57:10,723 INFO [Time-limited test {}] server.Server(415): Started @238768ms 2024-11-19T04:57:10,738 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T04:57:10,772 INFO [regionserver/08a7f35e60d4:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T04:57:10,843 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:57:10,847 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T04:57:10,848 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T04:57:10,848 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T04:57:10,848 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T04:57:10,849 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@66ce6cc0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/hadoop.log.dir/,AVAILABLE} 2024-11-19T04:57:10,849 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fc50460{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T04:57:10,990 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@79b422de{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/java.io.tmpdir/jetty-localhost-43177-hadoop-hdfs-3_4_1-tests_jar-_-any-16971799037307140442/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:57:10,990 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@73447fd2{HTTP/1.1, (http/1.1)}{localhost:43177} 2024-11-19T04:57:10,990 INFO [Time-limited test {}] server.Server(415): Started @239035ms 2024-11-19T04:57:10,992 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T04:57:11,051 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:57:11,055 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T04:57:11,059 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T04:57:11,059 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T04:57:11,060 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T04:57:11,061 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5096343{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/hadoop.log.dir/,AVAILABLE} 2024-11-19T04:57:11,061 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@53298b3d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T04:57:11,119 WARN [Thread-1945 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/cluster_de53dc38-4034-b207-c060-c6ceccacd599/data/data1/current/BP-486461787-172.17.0.2-1731992230528/current, will proceed with Du for space computation calculation, 2024-11-19T04:57:11,119 WARN [Thread-1946 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/cluster_de53dc38-4034-b207-c060-c6ceccacd599/data/data2/current/BP-486461787-172.17.0.2-1731992230528/current, will proceed with Du for space computation calculation, 2024-11-19T04:57:11,145 WARN [Thread-1924 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T04:57:11,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:11,171 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4647e8e1a1de82a9 with lease ID 0x3fc334a6f6860a8e: Processing first storage report for DS-84112210-6380-411b-9f85-856c80713dc1 from datanode DatanodeRegistration(127.0.0.1:39975, datanodeUuid=2e516bfa-2197-4fba-84b8-20b57429a12f, infoPort=36255, infoSecurePort=0, ipcPort=33389, storageInfo=lv=-57;cid=testClusterID;nsid=2047755448;c=1731992230528) 2024-11-19T04:57:11,171 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4647e8e1a1de82a9 with lease ID 0x3fc334a6f6860a8e: from storage DS-84112210-6380-411b-9f85-856c80713dc1 node DatanodeRegistration(127.0.0.1:39975, datanodeUuid=2e516bfa-2197-4fba-84b8-20b57429a12f, infoPort=36255, infoSecurePort=0, ipcPort=33389, storageInfo=lv=-57;cid=testClusterID;nsid=2047755448;c=1731992230528), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:57:11,171 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4647e8e1a1de82a9 with lease ID 0x3fc334a6f6860a8e: Processing first storage report for DS-d7739c41-bc1e-4c22-a126-63c888dbe6a4 from datanode DatanodeRegistration(127.0.0.1:39975, datanodeUuid=2e516bfa-2197-4fba-84b8-20b57429a12f, infoPort=36255, infoSecurePort=0, ipcPort=33389, storageInfo=lv=-57;cid=testClusterID;nsid=2047755448;c=1731992230528) 2024-11-19T04:57:11,171 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4647e8e1a1de82a9 with lease ID 0x3fc334a6f6860a8e: from storage DS-d7739c41-bc1e-4c22-a126-63c888dbe6a4 node DatanodeRegistration(127.0.0.1:39975, datanodeUuid=2e516bfa-2197-4fba-84b8-20b57429a12f, infoPort=36255, infoSecurePort=0, ipcPort=33389, storageInfo=lv=-57;cid=testClusterID;nsid=2047755448;c=1731992230528), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:57:11,209 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@652d6e37{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/java.io.tmpdir/jetty-localhost-37759-hadoop-hdfs-3_4_1-tests_jar-_-any-4627346786728942856/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:57:11,210 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@54fcac{HTTP/1.1, (http/1.1)}{localhost:37759} 2024-11-19T04:57:11,210 INFO [Time-limited test {}] server.Server(415): Started @239255ms 2024-11-19T04:57:11,211 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T04:57:11,317 WARN [Thread-1971 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/cluster_de53dc38-4034-b207-c060-c6ceccacd599/data/data3/current/BP-486461787-172.17.0.2-1731992230528/current, will proceed with Du for space computation calculation, 2024-11-19T04:57:11,317 WARN [Thread-1972 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/cluster_de53dc38-4034-b207-c060-c6ceccacd599/data/data4/current/BP-486461787-172.17.0.2-1731992230528/current, will proceed with Du for space computation calculation, 2024-11-19T04:57:11,338 WARN [Thread-1960 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T04:57:11,341 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb523c4dc43d479cd with lease ID 0x3fc334a6f6860a8f: Processing first storage report for DS-a1120f4b-c222-447e-a2c7-e620807c5016 from datanode DatanodeRegistration(127.0.0.1:36847, datanodeUuid=c7b5a726-e8a2-4f29-8687-82ac74f60bcd, infoPort=36015, infoSecurePort=0, ipcPort=41795, storageInfo=lv=-57;cid=testClusterID;nsid=2047755448;c=1731992230528) 2024-11-19T04:57:11,341 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb523c4dc43d479cd with lease ID 0x3fc334a6f6860a8f: from storage DS-a1120f4b-c222-447e-a2c7-e620807c5016 node DatanodeRegistration(127.0.0.1:36847, datanodeUuid=c7b5a726-e8a2-4f29-8687-82ac74f60bcd, infoPort=36015, infoSecurePort=0, ipcPort=41795, storageInfo=lv=-57;cid=testClusterID;nsid=2047755448;c=1731992230528), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:57:11,341 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb523c4dc43d479cd with lease ID 0x3fc334a6f6860a8f: Processing first storage report for DS-74a94e12-0daa-46b9-973f-bb40241cb7bf from datanode DatanodeRegistration(127.0.0.1:36847, datanodeUuid=c7b5a726-e8a2-4f29-8687-82ac74f60bcd, infoPort=36015, infoSecurePort=0, ipcPort=41795, storageInfo=lv=-57;cid=testClusterID;nsid=2047755448;c=1731992230528) 2024-11-19T04:57:11,341 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb523c4dc43d479cd with lease ID 0x3fc334a6f6860a8f: from storage DS-74a94e12-0daa-46b9-973f-bb40241cb7bf node DatanodeRegistration(127.0.0.1:36847, datanodeUuid=c7b5a726-e8a2-4f29-8687-82ac74f60bcd, infoPort=36015, infoSecurePort=0, ipcPort=41795, storageInfo=lv=-57;cid=testClusterID;nsid=2047755448;c=1731992230528), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:57:11,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:11,440 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c 2024-11-19T04:57:11,443 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/cluster_de53dc38-4034-b207-c060-c6ceccacd599/zookeeper_0, clientPort=54523, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/cluster_de53dc38-4034-b207-c060-c6ceccacd599/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/cluster_de53dc38-4034-b207-c060-c6ceccacd599/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T04:57:11,444 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54523 2024-11-19T04:57:11,444 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:57:11,445 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:57:11,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741825_1001 (size=7) 2024-11-19T04:57:11,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741825_1001 (size=7) 2024-11-19T04:57:11,456 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d with version=8 2024-11-19T04:57:11,456 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/hbase-staging 2024-11-19T04:57:11,458 INFO [Time-limited test {}] client.ConnectionUtils(128): master/08a7f35e60d4:0 server-side Connection retries=45 2024-11-19T04:57:11,458 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T04:57:11,458 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T04:57:11,458 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T04:57:11,458 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T04:57:11,458 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T04:57:11,458 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T04:57:11,458 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T04:57:11,460 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35671 2024-11-19T04:57:11,461 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35671 connecting to ZooKeeper ensemble=127.0.0.1:54523 2024-11-19T04:57:11,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:356710x0, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T04:57:11,468 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35671-0x1012e9608a20000 connected 2024-11-19T04:57:11,485 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:57:11,487 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:57:11,489 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35671-0x1012e9608a20000, quorum=127.0.0.1:54523, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T04:57:11,489 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d, hbase.cluster.distributed=false 2024-11-19T04:57:11,491 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35671-0x1012e9608a20000, quorum=127.0.0.1:54523, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T04:57:11,491 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35671 2024-11-19T04:57:11,492 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35671 2024-11-19T04:57:11,492 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35671 2024-11-19T04:57:11,492 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35671 2024-11-19T04:57:11,493 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35671 2024-11-19T04:57:11,509 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/08a7f35e60d4:0 server-side Connection retries=45 2024-11-19T04:57:11,509 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T04:57:11,509 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T04:57:11,509 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T04:57:11,509 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T04:57:11,509 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T04:57:11,509 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T04:57:11,509 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T04:57:11,510 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42609 2024-11-19T04:57:11,511 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42609 connecting to ZooKeeper ensemble=127.0.0.1:54523 2024-11-19T04:57:11,512 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:57:11,514 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:57:11,518 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:426090x0, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T04:57:11,518 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:426090x0, quorum=127.0.0.1:54523, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T04:57:11,519 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42609-0x1012e9608a20001 connected 2024-11-19T04:57:11,519 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T04:57:11,519 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T04:57:11,520 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42609-0x1012e9608a20001, quorum=127.0.0.1:54523, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T04:57:11,521 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42609-0x1012e9608a20001, quorum=127.0.0.1:54523, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T04:57:11,522 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42609 2024-11-19T04:57:11,522 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42609 2024-11-19T04:57:11,522 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42609 2024-11-19T04:57:11,522 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42609 2024-11-19T04:57:11,522 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42609 2024-11-19T04:57:11,536 DEBUG [M:0;08a7f35e60d4:35671 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;08a7f35e60d4:35671 2024-11-19T04:57:11,536 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/08a7f35e60d4,35671,1731992231458 2024-11-19T04:57:11,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35671-0x1012e9608a20000, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T04:57:11,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42609-0x1012e9608a20001, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T04:57:11,538 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35671-0x1012e9608a20000, quorum=127.0.0.1:54523, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/08a7f35e60d4,35671,1731992231458 2024-11-19T04:57:11,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42609-0x1012e9608a20001, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T04:57:11,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42609-0x1012e9608a20001, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:57:11,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35671-0x1012e9608a20000, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:57:11,541 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35671-0x1012e9608a20000, quorum=127.0.0.1:54523, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T04:57:11,541 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/08a7f35e60d4,35671,1731992231458 from backup master directory 2024-11-19T04:57:11,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35671-0x1012e9608a20000, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/08a7f35e60d4,35671,1731992231458 2024-11-19T04:57:11,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42609-0x1012e9608a20001, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T04:57:11,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35671-0x1012e9608a20000, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T04:57:11,544 WARN [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T04:57:11,544 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=08a7f35e60d4,35671,1731992231458 2024-11-19T04:57:11,549 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/hbase.id] with ID: 292891e3-4114-4b30-978a-04e02560c719 2024-11-19T04:57:11,549 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/.tmp/hbase.id 2024-11-19T04:57:11,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741826_1002 (size=42) 2024-11-19T04:57:11,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741826_1002 (size=42) 2024-11-19T04:57:11,556 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/.tmp/hbase.id]:[hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/hbase.id] 2024-11-19T04:57:11,568 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:57:11,568 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T04:57:11,570 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-19T04:57:11,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42609-0x1012e9608a20001, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:57:11,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35671-0x1012e9608a20000, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:57:11,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741827_1003 (size=196) 2024-11-19T04:57:11,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741827_1003 (size=196) 2024-11-19T04:57:11,579 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T04:57:11,580 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T04:57:11,580 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T04:57:11,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741828_1004 (size=1189) 2024-11-19T04:57:11,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741828_1004 (size=1189) 2024-11-19T04:57:11,593 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/MasterData/data/master/store 2024-11-19T04:57:11,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741829_1005 (size=34) 2024-11-19T04:57:11,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741829_1005 (size=34) 2024-11-19T04:57:11,601 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:57:11,601 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T04:57:11,601 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:57:11,601 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:57:11,601 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T04:57:11,601 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:57:11,601 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:57:11,601 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731992231601Disabling compacts and flushes for region at 1731992231601Disabling writes for close at 1731992231601Writing region close event to WAL at 1731992231601Closed at 1731992231601 2024-11-19T04:57:11,603 WARN [master/08a7f35e60d4:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/MasterData/data/master/store/.initializing 2024-11-19T04:57:11,603 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/MasterData/WALs/08a7f35e60d4,35671,1731992231458 2024-11-19T04:57:11,607 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=08a7f35e60d4%2C35671%2C1731992231458, suffix=, logDir=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/MasterData/WALs/08a7f35e60d4,35671,1731992231458, archiveDir=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/MasterData/oldWALs, maxLogs=10 2024-11-19T04:57:11,608 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C35671%2C1731992231458.1731992231607 2024-11-19T04:57:11,615 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/MasterData/WALs/08a7f35e60d4,35671,1731992231458/08a7f35e60d4%2C35671%2C1731992231458.1731992231607 2024-11-19T04:57:11,621 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36015:36015),(127.0.0.1/127.0.0.1:36255:36255)] 2024-11-19T04:57:11,624 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T04:57:11,624 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:57:11,625 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:57:11,625 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:57:11,628 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:57:11,630 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T04:57:11,630 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:57:11,631 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:57:11,631 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:57:11,632 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T04:57:11,632 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:57:11,632 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T04:57:11,632 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:57:11,633 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T04:57:11,634 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:57:11,634 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T04:57:11,634 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:57:11,635 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T04:57:11,635 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:57:11,636 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T04:57:11,636 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:57:11,637 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:57:11,637 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:57:11,638 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:57:11,638 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:57:11,639 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T04:57:11,640 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:57:11,645 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T04:57:11,646 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=775351, jitterRate=-0.014091357588768005}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T04:57:11,646 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731992231625Initializing all the Stores at 1731992231626 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992231626Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992231628 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992231628Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992231628Cleaning up temporary data from old regions at 1731992231639 (+11 ms)Region opened successfully at 1731992231646 (+7 ms) 2024-11-19T04:57:11,647 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T04:57:11,650 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6865326d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=08a7f35e60d4/172.17.0.2:0 2024-11-19T04:57:11,651 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T04:57:11,651 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T04:57:11,652 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T04:57:11,652 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T04:57:11,652 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-19T04:57:11,653 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-19T04:57:11,653 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T04:57:11,656 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T04:57:11,658 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35671-0x1012e9608a20000, quorum=127.0.0.1:54523, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T04:57:11,659 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T04:57:11,659 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T04:57:11,660 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35671-0x1012e9608a20000, quorum=127.0.0.1:54523, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T04:57:11,661 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T04:57:11,661 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T04:57:11,662 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35671-0x1012e9608a20000, quorum=127.0.0.1:54523, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T04:57:11,665 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T04:57:11,666 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35671-0x1012e9608a20000, quorum=127.0.0.1:54523, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T04:57:11,668 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T04:57:11,670 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35671-0x1012e9608a20000, quorum=127.0.0.1:54523, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T04:57:11,671 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T04:57:11,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42609-0x1012e9608a20001, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T04:57:11,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35671-0x1012e9608a20000, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T04:57:11,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35671-0x1012e9608a20000, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:57:11,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42609-0x1012e9608a20001, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:57:11,673 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=08a7f35e60d4,35671,1731992231458, sessionid=0x1012e9608a20000, setting cluster-up flag (Was=false) 2024-11-19T04:57:11,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42609-0x1012e9608a20001, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:57:11,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35671-0x1012e9608a20000, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:57:11,684 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T04:57:11,685 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=08a7f35e60d4,35671,1731992231458 2024-11-19T04:57:11,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35671-0x1012e9608a20000, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:57:11,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42609-0x1012e9608a20001, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:57:11,693 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T04:57:11,694 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=08a7f35e60d4,35671,1731992231458 2024-11-19T04:57:11,696 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T04:57:11,697 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T04:57:11,698 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T04:57:11,698 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T04:57:11,698 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 08a7f35e60d4,35671,1731992231458 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T04:57:11,699 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/08a7f35e60d4:0, corePoolSize=5, maxPoolSize=5 2024-11-19T04:57:11,699 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/08a7f35e60d4:0, corePoolSize=5, maxPoolSize=5 2024-11-19T04:57:11,699 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/08a7f35e60d4:0, corePoolSize=5, maxPoolSize=5 2024-11-19T04:57:11,700 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/08a7f35e60d4:0, corePoolSize=5, maxPoolSize=5 2024-11-19T04:57:11,700 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/08a7f35e60d4:0, corePoolSize=10, maxPoolSize=10 2024-11-19T04:57:11,700 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:57:11,700 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/08a7f35e60d4:0, corePoolSize=2, maxPoolSize=2 2024-11-19T04:57:11,700 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:57:11,702 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T04:57:11,702 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T04:57:11,702 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731992261702 2024-11-19T04:57:11,702 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T04:57:11,702 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T04:57:11,702 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T04:57:11,702 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T04:57:11,702 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T04:57:11,702 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T04:57:11,703 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T04:57:11,703 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:57:11,703 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T04:57:11,703 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T04:57:11,703 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T04:57:11,703 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T04:57:11,703 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T04:57:11,704 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T04:57:11,704 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.large.0-1731992231704,5,FailOnTimeoutGroup] 2024-11-19T04:57:11,708 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.small.0-1731992231704,5,FailOnTimeoutGroup] 2024-11-19T04:57:11,708 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T04:57:11,708 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T04:57:11,708 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T04:57:11,708 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T04:57:11,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741831_1007 (size=1321) 2024-11-19T04:57:11,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741831_1007 (size=1321) 2024-11-19T04:57:11,719 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T04:57:11,719 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d 2024-11-19T04:57:11,724 INFO [RS:0;08a7f35e60d4:42609 {}] regionserver.HRegionServer(746): ClusterId : 292891e3-4114-4b30-978a-04e02560c719 2024-11-19T04:57:11,724 DEBUG [RS:0;08a7f35e60d4:42609 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T04:57:11,727 DEBUG [RS:0;08a7f35e60d4:42609 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T04:57:11,727 DEBUG [RS:0;08a7f35e60d4:42609 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T04:57:11,729 DEBUG [RS:0;08a7f35e60d4:42609 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T04:57:11,729 DEBUG [RS:0;08a7f35e60d4:42609 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@295700e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=08a7f35e60d4/172.17.0.2:0 2024-11-19T04:57:11,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741832_1008 (size=32) 2024-11-19T04:57:11,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741832_1008 (size=32) 2024-11-19T04:57:11,735 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:57:11,737 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T04:57:11,738 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T04:57:11,738 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:57:11,739 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:57:11,739 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T04:57:11,740 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T04:57:11,740 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:57:11,740 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:57:11,740 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T04:57:11,742 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T04:57:11,742 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:57:11,743 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:57:11,743 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T04:57:11,744 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T04:57:11,744 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:57:11,744 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:57:11,745 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T04:57:11,745 DEBUG [RS:0;08a7f35e60d4:42609 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;08a7f35e60d4:42609 2024-11-19T04:57:11,745 INFO [RS:0;08a7f35e60d4:42609 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T04:57:11,745 INFO [RS:0;08a7f35e60d4:42609 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T04:57:11,745 DEBUG [RS:0;08a7f35e60d4:42609 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T04:57:11,745 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/hbase/meta/1588230740 2024-11-19T04:57:11,746 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/hbase/meta/1588230740 2024-11-19T04:57:11,746 INFO [RS:0;08a7f35e60d4:42609 {}] regionserver.HRegionServer(2659): reportForDuty to master=08a7f35e60d4,35671,1731992231458 with port=42609, startcode=1731992231509 2024-11-19T04:57:11,746 DEBUG [RS:0;08a7f35e60d4:42609 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T04:57:11,747 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T04:57:11,747 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T04:57:11,748 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T04:57:11,749 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46313, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T04:57:11,749 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T04:57:11,749 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35671 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 08a7f35e60d4,42609,1731992231509 2024-11-19T04:57:11,749 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35671 {}] master.ServerManager(517): Registering regionserver=08a7f35e60d4,42609,1731992231509 2024-11-19T04:57:11,751 DEBUG [RS:0;08a7f35e60d4:42609 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d 2024-11-19T04:57:11,751 DEBUG [RS:0;08a7f35e60d4:42609 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40207 2024-11-19T04:57:11,751 DEBUG [RS:0;08a7f35e60d4:42609 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T04:57:11,752 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T04:57:11,752 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=768924, jitterRate=-0.022263303399086}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T04:57:11,753 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731992231736Initializing all the Stores at 1731992231736Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992231736Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992231737 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992231737Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992231737Cleaning up temporary data from old regions at 1731992231747 (+10 ms)Region opened successfully at 1731992231753 (+6 ms) 2024-11-19T04:57:11,753 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T04:57:11,753 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T04:57:11,753 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T04:57:11,753 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T04:57:11,753 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T04:57:11,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35671-0x1012e9608a20000, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T04:57:11,755 DEBUG [RS:0;08a7f35e60d4:42609 {}] zookeeper.ZKUtil(111): regionserver:42609-0x1012e9608a20001, quorum=127.0.0.1:54523, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/08a7f35e60d4,42609,1731992231509 2024-11-19T04:57:11,755 WARN [RS:0;08a7f35e60d4:42609 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T04:57:11,755 INFO [RS:0;08a7f35e60d4:42609 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T04:57:11,755 DEBUG [RS:0;08a7f35e60d4:42609 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/WALs/08a7f35e60d4,42609,1731992231509 2024-11-19T04:57:11,761 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T04:57:11,761 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731992231753Disabling compacts and flushes for region at 1731992231753Disabling writes for close at 1731992231753Writing region close event to WAL at 1731992231760 (+7 ms)Closed at 1731992231760 2024-11-19T04:57:11,761 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [08a7f35e60d4,42609,1731992231509] 2024-11-19T04:57:11,762 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T04:57:11,762 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T04:57:11,762 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T04:57:11,764 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T04:57:11,765 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T04:57:11,765 INFO [RS:0;08a7f35e60d4:42609 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T04:57:11,767 INFO [RS:0;08a7f35e60d4:42609 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T04:57:11,767 INFO [RS:0;08a7f35e60d4:42609 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T04:57:11,767 INFO [RS:0;08a7f35e60d4:42609 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T04:57:11,768 INFO [RS:0;08a7f35e60d4:42609 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T04:57:11,768 INFO [RS:0;08a7f35e60d4:42609 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T04:57:11,768 INFO [RS:0;08a7f35e60d4:42609 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T04:57:11,769 DEBUG [RS:0;08a7f35e60d4:42609 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:57:11,769 DEBUG [RS:0;08a7f35e60d4:42609 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:57:11,769 DEBUG [RS:0;08a7f35e60d4:42609 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:57:11,769 DEBUG [RS:0;08a7f35e60d4:42609 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:57:11,769 DEBUG [RS:0;08a7f35e60d4:42609 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:57:11,769 DEBUG [RS:0;08a7f35e60d4:42609 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/08a7f35e60d4:0, corePoolSize=2, maxPoolSize=2 2024-11-19T04:57:11,769 DEBUG [RS:0;08a7f35e60d4:42609 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:57:11,769 DEBUG [RS:0;08a7f35e60d4:42609 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:57:11,769 DEBUG [RS:0;08a7f35e60d4:42609 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:57:11,769 DEBUG [RS:0;08a7f35e60d4:42609 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:57:11,769 DEBUG [RS:0;08a7f35e60d4:42609 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:57:11,769 DEBUG [RS:0;08a7f35e60d4:42609 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:57:11,769 DEBUG [RS:0;08a7f35e60d4:42609 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/08a7f35e60d4:0, corePoolSize=3, maxPoolSize=3 2024-11-19T04:57:11,769 DEBUG [RS:0;08a7f35e60d4:42609 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0, corePoolSize=3, maxPoolSize=3 2024-11-19T04:57:11,772 INFO [RS:0;08a7f35e60d4:42609 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T04:57:11,772 INFO [RS:0;08a7f35e60d4:42609 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T04:57:11,772 INFO [RS:0;08a7f35e60d4:42609 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T04:57:11,772 INFO [RS:0;08a7f35e60d4:42609 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T04:57:11,772 INFO [RS:0;08a7f35e60d4:42609 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T04:57:11,772 INFO [RS:0;08a7f35e60d4:42609 {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,42609,1731992231509-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T04:57:11,788 INFO [RS:0;08a7f35e60d4:42609 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T04:57:11,788 INFO [RS:0;08a7f35e60d4:42609 {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,42609,1731992231509-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T04:57:11,788 INFO [RS:0;08a7f35e60d4:42609 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:57:11,788 INFO [RS:0;08a7f35e60d4:42609 {}] regionserver.Replication(171): 08a7f35e60d4,42609,1731992231509 started 2024-11-19T04:57:11,803 INFO [RS:0;08a7f35e60d4:42609 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:57:11,803 INFO [RS:0;08a7f35e60d4:42609 {}] regionserver.HRegionServer(1482): Serving as 08a7f35e60d4,42609,1731992231509, RpcServer on 08a7f35e60d4/172.17.0.2:42609, sessionid=0x1012e9608a20001 2024-11-19T04:57:11,804 DEBUG [RS:0;08a7f35e60d4:42609 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T04:57:11,804 DEBUG [RS:0;08a7f35e60d4:42609 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 08a7f35e60d4,42609,1731992231509 2024-11-19T04:57:11,804 DEBUG [RS:0;08a7f35e60d4:42609 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '08a7f35e60d4,42609,1731992231509' 2024-11-19T04:57:11,804 DEBUG [RS:0;08a7f35e60d4:42609 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T04:57:11,804 DEBUG [RS:0;08a7f35e60d4:42609 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T04:57:11,805 DEBUG [RS:0;08a7f35e60d4:42609 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T04:57:11,805 DEBUG [RS:0;08a7f35e60d4:42609 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T04:57:11,805 DEBUG [RS:0;08a7f35e60d4:42609 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 08a7f35e60d4,42609,1731992231509 2024-11-19T04:57:11,805 DEBUG [RS:0;08a7f35e60d4:42609 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '08a7f35e60d4,42609,1731992231509' 2024-11-19T04:57:11,805 DEBUG [RS:0;08a7f35e60d4:42609 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T04:57:11,805 DEBUG [RS:0;08a7f35e60d4:42609 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T04:57:11,806 DEBUG [RS:0;08a7f35e60d4:42609 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T04:57:11,806 INFO [RS:0;08a7f35e60d4:42609 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T04:57:11,806 INFO [RS:0;08a7f35e60d4:42609 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T04:57:11,908 INFO [RS:0;08a7f35e60d4:42609 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=08a7f35e60d4%2C42609%2C1731992231509, suffix=, logDir=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/WALs/08a7f35e60d4,42609,1731992231509, archiveDir=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/oldWALs, maxLogs=32 2024-11-19T04:57:11,908 INFO [RS:0;08a7f35e60d4:42609 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C42609%2C1731992231509.1731992231908 2024-11-19T04:57:11,914 INFO [RS:0;08a7f35e60d4:42609 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/WALs/08a7f35e60d4,42609,1731992231509/08a7f35e60d4%2C42609%2C1731992231509.1731992231908 2024-11-19T04:57:11,915 DEBUG [RS:0;08a7f35e60d4:42609 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36255:36255),(127.0.0.1/127.0.0.1:36015:36015)] 2024-11-19T04:57:11,915 WARN [08a7f35e60d4:35671 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-19T04:57:12,166 DEBUG [08a7f35e60d4:35671 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T04:57:12,166 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=08a7f35e60d4,42609,1731992231509 2024-11-19T04:57:12,168 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 08a7f35e60d4,42609,1731992231509, state=OPENING 2024-11-19T04:57:12,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:12,169 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T04:57:12,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35671-0x1012e9608a20000, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:57:12,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42609-0x1012e9608a20001, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:57:12,171 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T04:57:12,171 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T04:57:12,171 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T04:57:12,171 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=08a7f35e60d4,42609,1731992231509}] 2024-11-19T04:57:12,324 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T04:57:12,326 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52471, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T04:57:12,330 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T04:57:12,330 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T04:57:12,331 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=08a7f35e60d4%2C42609%2C1731992231509.meta, suffix=.meta, logDir=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/WALs/08a7f35e60d4,42609,1731992231509, archiveDir=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/oldWALs, maxLogs=32 2024-11-19T04:57:12,332 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C42609%2C1731992231509.meta.1731992232331.meta 2024-11-19T04:57:12,336 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/WALs/08a7f35e60d4,42609,1731992231509/08a7f35e60d4%2C42609%2C1731992231509.meta.1731992232331.meta 2024-11-19T04:57:12,338 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36255:36255),(127.0.0.1/127.0.0.1:36015:36015)] 2024-11-19T04:57:12,340 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T04:57:12,341 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T04:57:12,341 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T04:57:12,341 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T04:57:12,341 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T04:57:12,341 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:57:12,341 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T04:57:12,341 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T04:57:12,342 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T04:57:12,343 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T04:57:12,343 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:57:12,343 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:57:12,343 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T04:57:12,344 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T04:57:12,344 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:57:12,344 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:57:12,344 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T04:57:12,345 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T04:57:12,345 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:57:12,345 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:57:12,345 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T04:57:12,346 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T04:57:12,346 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:57:12,346 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:57:12,346 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T04:57:12,347 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/hbase/meta/1588230740 2024-11-19T04:57:12,348 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/hbase/meta/1588230740 2024-11-19T04:57:12,349 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T04:57:12,349 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T04:57:12,349 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T04:57:12,350 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T04:57:12,351 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=747404, jitterRate=-0.04962716996669769}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T04:57:12,351 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T04:57:12,351 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731992232341Writing region info on filesystem at 1731992232341Initializing all the Stores at 1731992232342 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992232342Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992232342Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992232342Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992232342Cleaning up temporary data from old regions at 1731992232349 (+7 ms)Running coprocessor post-open hooks at 1731992232351 (+2 ms)Region opened successfully at 1731992232351 2024-11-19T04:57:12,352 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731992232324 2024-11-19T04:57:12,355 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T04:57:12,355 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T04:57:12,356 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=08a7f35e60d4,42609,1731992231509 2024-11-19T04:57:12,357 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 08a7f35e60d4,42609,1731992231509, state=OPEN 2024-11-19T04:57:12,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42609-0x1012e9608a20001, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T04:57:12,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35671-0x1012e9608a20000, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T04:57:12,365 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=08a7f35e60d4,42609,1731992231509 2024-11-19T04:57:12,365 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T04:57:12,365 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T04:57:12,367 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T04:57:12,367 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=08a7f35e60d4,42609,1731992231509 in 194 msec 2024-11-19T04:57:12,370 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T04:57:12,370 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 605 msec 2024-11-19T04:57:12,370 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T04:57:12,370 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T04:57:12,372 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T04:57:12,372 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=08a7f35e60d4,42609,1731992231509, seqNum=-1] 2024-11-19T04:57:12,372 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T04:57:12,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:12,373 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54947, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T04:57:12,378 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 680 msec 2024-11-19T04:57:12,378 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731992232378, completionTime=-1 2024-11-19T04:57:12,378 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T04:57:12,378 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-19T04:57:12,380 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-19T04:57:12,380 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731992292380 2024-11-19T04:57:12,380 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731992352380 2024-11-19T04:57:12,380 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-19T04:57:12,381 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,35671,1731992231458-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T04:57:12,381 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,35671,1731992231458-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:57:12,381 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,35671,1731992231458-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:57:12,381 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-08a7f35e60d4:35671, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:57:12,381 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T04:57:12,381 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T04:57:12,383 DEBUG [master/08a7f35e60d4:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T04:57:12,385 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.841sec 2024-11-19T04:57:12,385 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T04:57:12,385 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T04:57:12,385 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T04:57:12,385 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T04:57:12,385 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T04:57:12,385 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,35671,1731992231458-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T04:57:12,385 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,35671,1731992231458-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T04:57:12,388 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T04:57:12,388 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T04:57:12,388 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,35671,1731992231458-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:57:12,425 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25f949b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T04:57:12,425 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 08a7f35e60d4,35671,-1 for getting cluster id 2024-11-19T04:57:12,425 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T04:57:12,426 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '292891e3-4114-4b30-978a-04e02560c719' 2024-11-19T04:57:12,427 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T04:57:12,427 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "292891e3-4114-4b30-978a-04e02560c719" 2024-11-19T04:57:12,427 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31d57e88, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T04:57:12,427 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [08a7f35e60d4,35671,-1] 2024-11-19T04:57:12,427 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T04:57:12,427 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:57:12,428 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40108, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T04:57:12,429 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29765213, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T04:57:12,429 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T04:57:12,430 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=08a7f35e60d4,42609,1731992231509, seqNum=-1] 2024-11-19T04:57:12,430 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T04:57:12,431 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40418, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T04:57:12,433 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=08a7f35e60d4,35671,1731992231458 2024-11-19T04:57:12,433 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:57:12,435 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-19T04:57:12,436 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-19T04:57:12,437 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 08a7f35e60d4,35671,1731992231458 2024-11-19T04:57:12,437 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3d63bf95 2024-11-19T04:57:12,437 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-19T04:57:12,438 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40122, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-19T04:57:12,438 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35671 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-19T04:57:12,438 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35671 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-19T04:57:12,438 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35671 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T04:57:12,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35671 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-19T04:57:12,441 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-19T04:57:12,441 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:57:12,441 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35671 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-19T04:57:12,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35671 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T04:57:12,442 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-19T04:57:12,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741835_1011 (size=381) 2024-11-19T04:57:12,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741835_1011 (size=381) 2024-11-19T04:57:12,451 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 561a52b828b94f12e63c17503cb73505, NAME => 'TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d 2024-11-19T04:57:12,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741836_1012 (size=64) 2024-11-19T04:57:12,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741836_1012 (size=64) 2024-11-19T04:57:12,458 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:57:12,458 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 561a52b828b94f12e63c17503cb73505, disabling compactions & flushes 2024-11-19T04:57:12,458 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505. 2024-11-19T04:57:12,458 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505. 2024-11-19T04:57:12,458 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505. after waiting 0 ms 2024-11-19T04:57:12,458 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505. 2024-11-19T04:57:12,458 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505. 2024-11-19T04:57:12,458 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 561a52b828b94f12e63c17503cb73505: Waiting for close lock at 1731992232458Disabling compacts and flushes for region at 1731992232458Disabling writes for close at 1731992232458Writing region close event to WAL at 1731992232458Closed at 1731992232458 2024-11-19T04:57:12,460 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-19T04:57:12,460 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731992232460"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731992232460"}]},"ts":"1731992232460"} 2024-11-19T04:57:12,462 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-19T04:57:12,463 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-19T04:57:12,463 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731992232463"}]},"ts":"1731992232463"} 2024-11-19T04:57:12,465 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-19T04:57:12,466 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=561a52b828b94f12e63c17503cb73505, ASSIGN}] 2024-11-19T04:57:12,467 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=561a52b828b94f12e63c17503cb73505, ASSIGN 2024-11-19T04:57:12,468 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=561a52b828b94f12e63c17503cb73505, ASSIGN; state=OFFLINE, location=08a7f35e60d4,42609,1731992231509; forceNewPlan=false, retain=false 2024-11-19T04:57:12,618 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=561a52b828b94f12e63c17503cb73505, regionState=OPENING, regionLocation=08a7f35e60d4,42609,1731992231509 2024-11-19T04:57:12,621 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=561a52b828b94f12e63c17503cb73505, ASSIGN because future has completed 2024-11-19T04:57:12,622 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 561a52b828b94f12e63c17503cb73505, server=08a7f35e60d4,42609,1731992231509}] 2024-11-19T04:57:12,780 INFO [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505. 2024-11-19T04:57:12,780 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 561a52b828b94f12e63c17503cb73505, NAME => 'TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505.', STARTKEY => '', ENDKEY => ''} 2024-11-19T04:57:12,780 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 561a52b828b94f12e63c17503cb73505 2024-11-19T04:57:12,780 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:57:12,780 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 561a52b828b94f12e63c17503cb73505 2024-11-19T04:57:12,780 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 561a52b828b94f12e63c17503cb73505 2024-11-19T04:57:12,781 INFO [StoreOpener-561a52b828b94f12e63c17503cb73505-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 561a52b828b94f12e63c17503cb73505 2024-11-19T04:57:12,783 INFO [StoreOpener-561a52b828b94f12e63c17503cb73505-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 561a52b828b94f12e63c17503cb73505 columnFamilyName info 2024-11-19T04:57:12,783 DEBUG [StoreOpener-561a52b828b94f12e63c17503cb73505-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:57:12,783 INFO [StoreOpener-561a52b828b94f12e63c17503cb73505-1 {}] regionserver.HStore(327): Store=561a52b828b94f12e63c17503cb73505/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T04:57:12,783 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 561a52b828b94f12e63c17503cb73505 2024-11-19T04:57:12,784 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505 2024-11-19T04:57:12,784 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505 2024-11-19T04:57:12,785 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 561a52b828b94f12e63c17503cb73505 2024-11-19T04:57:12,785 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 561a52b828b94f12e63c17503cb73505 2024-11-19T04:57:12,786 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 561a52b828b94f12e63c17503cb73505 2024-11-19T04:57:12,788 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T04:57:12,789 INFO [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 561a52b828b94f12e63c17503cb73505; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=738114, jitterRate=-0.06143985688686371}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T04:57:12,789 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 561a52b828b94f12e63c17503cb73505 2024-11-19T04:57:12,790 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 561a52b828b94f12e63c17503cb73505: Running coprocessor pre-open hook at 1731992232780Writing region info on filesystem at 1731992232780Initializing all the Stores at 1731992232781 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992232781Cleaning up temporary data from old regions at 1731992232785 (+4 ms)Running coprocessor post-open hooks at 1731992232789 (+4 ms)Region opened successfully at 1731992232790 (+1 ms) 2024-11-19T04:57:12,791 INFO [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505., pid=6, masterSystemTime=1731992232776 2024-11-19T04:57:12,793 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505. 2024-11-19T04:57:12,793 INFO [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505. 2024-11-19T04:57:12,794 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=561a52b828b94f12e63c17503cb73505, regionState=OPEN, openSeqNum=2, regionLocation=08a7f35e60d4,42609,1731992231509 2024-11-19T04:57:12,796 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 561a52b828b94f12e63c17503cb73505, server=08a7f35e60d4,42609,1731992231509 because future has completed 2024-11-19T04:57:12,800 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-19T04:57:12,800 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 561a52b828b94f12e63c17503cb73505, server=08a7f35e60d4,42609,1731992231509 in 175 msec 2024-11-19T04:57:12,802 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-19T04:57:12,802 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=561a52b828b94f12e63c17503cb73505, ASSIGN in 334 msec 2024-11-19T04:57:12,803 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-19T04:57:12,803 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731992232803"}]},"ts":"1731992232803"} 2024-11-19T04:57:12,806 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-19T04:57:12,807 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-19T04:57:12,809 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 368 msec 2024-11-19T04:57:13,169 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:13,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:14,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:14,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:14,983 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:14,983 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:14,984 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:14,984 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:14,984 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:14,984 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:15,004 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:15,004 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:15,004 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:15,004 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:15,004 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:15,005 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:15,008 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:15,008 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:15,008 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:15,011 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:15,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:15,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:15,516 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T04:57:15,517 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:15,517 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:15,518 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:15,518 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:15,518 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:15,518 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:15,540 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:15,540 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:15,541 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:15,541 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:15,541 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:15,542 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:15,545 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:15,545 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:15,546 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:15,548 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:16,171 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:16,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:17,146 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-19T04:57:17,146 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-19T04:57:17,147 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T04:57:17,171 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:17,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:17,765 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-19T04:57:17,766 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-19T04:57:18,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:18,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:19,173 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:19,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:20,173 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:20,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:21,174 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:21,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:22,174 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:22,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:22,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35671 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T04:57:22,469 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-19T04:57:22,469 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-19T04:57:22,472 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-19T04:57:22,472 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505. 2024-11-19T04:57:22,475 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505., hostname=08a7f35e60d4,42609,1731992231509, seqNum=2] 2024-11-19T04:57:22,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42609 {}] regionserver.HRegion(8855): Flush requested on 561a52b828b94f12e63c17503cb73505 2024-11-19T04:57:22,488 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 561a52b828b94f12e63c17503cb73505 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T04:57:22,506 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/.tmp/info/ec9a41cf56504961804a105169212d63 is 1080, key is row0001/info:/1731992242476/Put/seqid=0 2024-11-19T04:57:22,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741837_1013 (size=12509) 2024-11-19T04:57:22,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741837_1013 (size=12509) 2024-11-19T04:57:22,513 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/.tmp/info/ec9a41cf56504961804a105169212d63 2024-11-19T04:57:22,520 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/.tmp/info/ec9a41cf56504961804a105169212d63 as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/ec9a41cf56504961804a105169212d63 2024-11-19T04:57:22,526 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/ec9a41cf56504961804a105169212d63, entries=7, sequenceid=11, filesize=12.2 K 2024-11-19T04:57:22,527 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=22.07 KB/22596 for 561a52b828b94f12e63c17503cb73505 in 39ms, sequenceid=11, compaction requested=false 2024-11-19T04:57:22,527 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 561a52b828b94f12e63c17503cb73505: 2024-11-19T04:57:22,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42609 {}] regionserver.HRegion(8855): Flush requested on 561a52b828b94f12e63c17503cb73505 2024-11-19T04:57:22,528 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 561a52b828b94f12e63c17503cb73505 1/1 column families, dataSize=23.12 KB heapSize=25 KB 2024-11-19T04:57:22,533 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/.tmp/info/6bb6f8dabc93439aac717698d86d5655 is 1080, key is row0008/info:/1731992242489/Put/seqid=0 2024-11-19T04:57:22,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741838_1014 (size=28684) 2024-11-19T04:57:22,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741838_1014 (size=28684) 2024-11-19T04:57:22,544 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=23.12 KB at sequenceid=36 (bloomFilter=true), to=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/.tmp/info/6bb6f8dabc93439aac717698d86d5655 2024-11-19T04:57:22,550 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/.tmp/info/6bb6f8dabc93439aac717698d86d5655 as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/6bb6f8dabc93439aac717698d86d5655 2024-11-19T04:57:22,555 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/6bb6f8dabc93439aac717698d86d5655, entries=22, sequenceid=36, filesize=28.0 K 2024-11-19T04:57:22,556 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.12 KB/23672, heapSize ~24.98 KB/25584, currentSize=3.15 KB/3228 for 561a52b828b94f12e63c17503cb73505 in 28ms, sequenceid=36, compaction requested=false 2024-11-19T04:57:22,557 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 561a52b828b94f12e63c17503cb73505: 2024-11-19T04:57:22,557 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=40.2 K, sizeToCheck=16.0 K 2024-11-19T04:57:22,557 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T04:57:22,557 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/6bb6f8dabc93439aac717698d86d5655 because midkey is the same as first or last row 2024-11-19T04:57:22,649 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T04:57:22,650 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:22,651 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:22,651 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:22,651 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:22,651 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:22,651 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:22,672 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:22,672 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:22,673 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:22,673 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:22,673 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:22,674 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:22,677 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:22,677 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:22,678 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:22,680 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:23,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:23,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:24,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:24,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:24,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42609 {}] regionserver.HRegion(8855): Flush requested on 561a52b828b94f12e63c17503cb73505 2024-11-19T04:57:24,543 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 561a52b828b94f12e63c17503cb73505 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T04:57:24,548 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/.tmp/info/95b695d053fe4be2b24935fc7e57ac64 is 1080, key is row0030/info:/1731992242530/Put/seqid=0 2024-11-19T04:57:24,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741839_1015 (size=12509) 2024-11-19T04:57:24,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741839_1015 (size=12509) 2024-11-19T04:57:24,554 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=46 (bloomFilter=true), to=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/.tmp/info/95b695d053fe4be2b24935fc7e57ac64 2024-11-19T04:57:24,561 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/.tmp/info/95b695d053fe4be2b24935fc7e57ac64 as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/95b695d053fe4be2b24935fc7e57ac64 2024-11-19T04:57:24,567 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/95b695d053fe4be2b24935fc7e57ac64, entries=7, sequenceid=46, filesize=12.2 K 2024-11-19T04:57:24,568 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for 561a52b828b94f12e63c17503cb73505 in 25ms, sequenceid=46, compaction requested=true 2024-11-19T04:57:24,568 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 561a52b828b94f12e63c17503cb73505: 2024-11-19T04:57:24,568 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=52.4 K, sizeToCheck=16.0 K 2024-11-19T04:57:24,568 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T04:57:24,568 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/6bb6f8dabc93439aac717698d86d5655 because midkey is the same as first or last row 2024-11-19T04:57:24,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 561a52b828b94f12e63c17503cb73505:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T04:57:24,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T04:57:24,568 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T04:57:24,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42609 {}] regionserver.HRegion(8855): Flush requested on 561a52b828b94f12e63c17503cb73505 2024-11-19T04:57:24,569 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 561a52b828b94f12e63c17503cb73505 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-19T04:57:24,570 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 53702 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T04:57:24,570 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HStore(1541): 561a52b828b94f12e63c17503cb73505/info is initiating minor compaction (all files) 2024-11-19T04:57:24,570 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 561a52b828b94f12e63c17503cb73505/info in TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505. 2024-11-19T04:57:24,570 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/ec9a41cf56504961804a105169212d63, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/6bb6f8dabc93439aac717698d86d5655, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/95b695d053fe4be2b24935fc7e57ac64] into tmpdir=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/.tmp, totalSize=52.4 K 2024-11-19T04:57:24,570 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.Compactor(225): Compacting ec9a41cf56504961804a105169212d63, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731992242476 2024-11-19T04:57:24,571 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6bb6f8dabc93439aac717698d86d5655, keycount=22, bloomtype=ROW, size=28.0 K, encoding=NONE, compression=NONE, seqNum=36, earliestPutTs=1731992242489 2024-11-19T04:57:24,571 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 95b695d053fe4be2b24935fc7e57ac64, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=46, earliestPutTs=1731992242530 2024-11-19T04:57:24,574 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/.tmp/info/bd463863716442b98c948521d88e22fb is 1080, key is row0037/info:/1731992244544/Put/seqid=0 2024-11-19T04:57:24,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741840_1016 (size=18987) 2024-11-19T04:57:24,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741840_1016 (size=18987) 2024-11-19T04:57:24,581 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=62 (bloomFilter=true), to=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/.tmp/info/bd463863716442b98c948521d88e22fb 2024-11-19T04:57:24,588 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/.tmp/info/bd463863716442b98c948521d88e22fb as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/bd463863716442b98c948521d88e22fb 2024-11-19T04:57:24,591 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 561a52b828b94f12e63c17503cb73505#info#compaction#57 average throughput is 9.24 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T04:57:24,591 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/.tmp/info/eb095b4838c94bc8bd7526dfeca130f3 is 1080, key is row0001/info:/1731992242476/Put/seqid=0 2024-11-19T04:57:24,597 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/bd463863716442b98c948521d88e22fb, entries=13, sequenceid=62, filesize=18.5 K 2024-11-19T04:57:24,598 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=12.61 KB/12912 for 561a52b828b94f12e63c17503cb73505 in 29ms, sequenceid=62, compaction requested=false 2024-11-19T04:57:24,598 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 561a52b828b94f12e63c17503cb73505: 2024-11-19T04:57:24,599 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.0 K, sizeToCheck=16.0 K 2024-11-19T04:57:24,599 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T04:57:24,599 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/6bb6f8dabc93439aac717698d86d5655 because midkey is the same as first or last row 2024-11-19T04:57:24,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42609 {}] regionserver.HRegion(8855): Flush requested on 561a52b828b94f12e63c17503cb73505 2024-11-19T04:57:24,600 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 561a52b828b94f12e63c17503cb73505 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-19T04:57:24,607 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/.tmp/info/9bbe6802558141e49ebf357a56e4c6cc is 1080, key is row0050/info:/1731992244570/Put/seqid=0 2024-11-19T04:57:24,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741841_1017 (size=43901) 2024-11-19T04:57:24,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741841_1017 (size=43901) 2024-11-19T04:57:24,639 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/.tmp/info/eb095b4838c94bc8bd7526dfeca130f3 as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/eb095b4838c94bc8bd7526dfeca130f3 2024-11-19T04:57:24,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741842_1018 (size=20064) 2024-11-19T04:57:24,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741842_1018 (size=20064) 2024-11-19T04:57:24,641 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/.tmp/info/9bbe6802558141e49ebf357a56e4c6cc 2024-11-19T04:57:24,647 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/.tmp/info/9bbe6802558141e49ebf357a56e4c6cc as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/9bbe6802558141e49ebf357a56e4c6cc 2024-11-19T04:57:24,647 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 561a52b828b94f12e63c17503cb73505/info of 561a52b828b94f12e63c17503cb73505 into eb095b4838c94bc8bd7526dfeca130f3(size=42.9 K), total size for store is 61.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T04:57:24,647 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 561a52b828b94f12e63c17503cb73505: 2024-11-19T04:57:24,647 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505., storeName=561a52b828b94f12e63c17503cb73505/info, priority=13, startTime=1731992244568; duration=0sec 2024-11-19T04:57:24,647 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=61.4 K, sizeToCheck=16.0 K 2024-11-19T04:57:24,647 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T04:57:24,648 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/eb095b4838c94bc8bd7526dfeca130f3 because midkey is the same as first or last row 2024-11-19T04:57:24,648 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=61.4 K, sizeToCheck=16.0 K 2024-11-19T04:57:24,648 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T04:57:24,648 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/eb095b4838c94bc8bd7526dfeca130f3 because midkey is the same as first or last row 2024-11-19T04:57:24,648 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=61.4 K, sizeToCheck=16.0 K 2024-11-19T04:57:24,648 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T04:57:24,648 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/eb095b4838c94bc8bd7526dfeca130f3 because midkey is the same as first or last row 2024-11-19T04:57:24,648 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T04:57:24,648 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 561a52b828b94f12e63c17503cb73505:info 2024-11-19T04:57:24,653 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/9bbe6802558141e49ebf357a56e4c6cc, entries=14, sequenceid=79, filesize=19.6 K 2024-11-19T04:57:24,654 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=1.05 KB/1076 for 561a52b828b94f12e63c17503cb73505 in 54ms, sequenceid=79, compaction requested=true 2024-11-19T04:57:24,654 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 561a52b828b94f12e63c17503cb73505: 2024-11-19T04:57:24,654 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=81.0 K, sizeToCheck=16.0 K 2024-11-19T04:57:24,654 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T04:57:24,654 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/eb095b4838c94bc8bd7526dfeca130f3 because midkey is the same as first or last row 2024-11-19T04:57:24,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 561a52b828b94f12e63c17503cb73505:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T04:57:24,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T04:57:24,655 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T04:57:24,656 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 82952 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T04:57:24,656 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HStore(1541): 561a52b828b94f12e63c17503cb73505/info is initiating minor compaction (all files) 2024-11-19T04:57:24,656 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 561a52b828b94f12e63c17503cb73505/info in TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505. 2024-11-19T04:57:24,656 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/eb095b4838c94bc8bd7526dfeca130f3, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/bd463863716442b98c948521d88e22fb, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/9bbe6802558141e49ebf357a56e4c6cc] into tmpdir=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/.tmp, totalSize=81.0 K 2024-11-19T04:57:24,657 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.Compactor(225): Compacting eb095b4838c94bc8bd7526dfeca130f3, keycount=36, bloomtype=ROW, size=42.9 K, encoding=NONE, compression=NONE, seqNum=46, earliestPutTs=1731992242476 2024-11-19T04:57:24,657 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.Compactor(225): Compacting bd463863716442b98c948521d88e22fb, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=62, earliestPutTs=1731992244544 2024-11-19T04:57:24,657 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9bbe6802558141e49ebf357a56e4c6cc, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1731992244570 2024-11-19T04:57:24,670 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 561a52b828b94f12e63c17503cb73505#info#compaction#59 average throughput is 32.32 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T04:57:24,671 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/.tmp/info/cd5fb7f44eae4692af0a680415b1cc0c is 1080, key is row0001/info:/1731992242476/Put/seqid=0 2024-11-19T04:57:24,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741843_1019 (size=73224) 2024-11-19T04:57:24,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741843_1019 (size=73224) 2024-11-19T04:57:24,684 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/.tmp/info/cd5fb7f44eae4692af0a680415b1cc0c as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/cd5fb7f44eae4692af0a680415b1cc0c 2024-11-19T04:57:24,694 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 561a52b828b94f12e63c17503cb73505/info of 561a52b828b94f12e63c17503cb73505 into cd5fb7f44eae4692af0a680415b1cc0c(size=71.5 K), total size for store is 71.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T04:57:24,694 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 561a52b828b94f12e63c17503cb73505: 2024-11-19T04:57:24,694 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505., storeName=561a52b828b94f12e63c17503cb73505/info, priority=13, startTime=1731992244654; duration=0sec 2024-11-19T04:57:24,694 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.5 K, sizeToCheck=16.0 K 2024-11-19T04:57:24,694 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T04:57:24,695 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.5 K, sizeToCheck=16.0 K 2024-11-19T04:57:24,695 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T04:57:24,695 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.5 K, sizeToCheck=16.0 K 2024-11-19T04:57:24,695 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T04:57:24,696 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T04:57:24,696 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T04:57:24,696 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 561a52b828b94f12e63c17503cb73505:info 2024-11-19T04:57:24,697 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35671 {}] assignment.AssignmentManager(1363): Split request from 08a7f35e60d4,42609,1731992231509, parent={ENCODED => 561a52b828b94f12e63c17503cb73505, NAME => 'TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-19T04:57:24,704 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35671 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=08a7f35e60d4,42609,1731992231509 2024-11-19T04:57:24,708 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35671 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=561a52b828b94f12e63c17503cb73505, daughterA=4979a9be9a43e82c8b5cdff164028294, daughterB=e3da184731fb44e76a4ad228b993df2a 2024-11-19T04:57:24,710 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=561a52b828b94f12e63c17503cb73505, daughterA=4979a9be9a43e82c8b5cdff164028294, daughterB=e3da184731fb44e76a4ad228b993df2a 2024-11-19T04:57:24,710 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=561a52b828b94f12e63c17503cb73505, daughterA=4979a9be9a43e82c8b5cdff164028294, daughterB=e3da184731fb44e76a4ad228b993df2a 2024-11-19T04:57:24,710 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=561a52b828b94f12e63c17503cb73505, daughterA=4979a9be9a43e82c8b5cdff164028294, daughterB=e3da184731fb44e76a4ad228b993df2a 2024-11-19T04:57:24,718 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=561a52b828b94f12e63c17503cb73505, UNASSIGN}] 2024-11-19T04:57:24,719 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=561a52b828b94f12e63c17503cb73505, UNASSIGN 2024-11-19T04:57:24,721 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=561a52b828b94f12e63c17503cb73505, regionState=CLOSING, regionLocation=08a7f35e60d4,42609,1731992231509 2024-11-19T04:57:24,724 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=561a52b828b94f12e63c17503cb73505, UNASSIGN because future has completed 2024-11-19T04:57:24,724 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-19T04:57:24,725 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 561a52b828b94f12e63c17503cb73505, server=08a7f35e60d4,42609,1731992231509}] 2024-11-19T04:57:24,883 INFO [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 561a52b828b94f12e63c17503cb73505 2024-11-19T04:57:24,883 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-19T04:57:24,884 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 561a52b828b94f12e63c17503cb73505, disabling compactions & flushes 2024-11-19T04:57:24,884 INFO [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505. 2024-11-19T04:57:24,884 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505. 2024-11-19T04:57:24,884 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505. after waiting 0 ms 2024-11-19T04:57:24,884 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505. 2024-11-19T04:57:24,884 INFO [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 561a52b828b94f12e63c17503cb73505 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-19T04:57:24,889 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/.tmp/info/95857016e643434c840cfd7d2d99d28a is 1080, key is row0064/info:/1731992244601/Put/seqid=0 2024-11-19T04:57:24,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741844_1020 (size=6033) 2024-11-19T04:57:24,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741844_1020 (size=6033) 2024-11-19T04:57:24,895 INFO [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=85 (bloomFilter=true), to=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/.tmp/info/95857016e643434c840cfd7d2d99d28a 2024-11-19T04:57:24,904 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/.tmp/info/95857016e643434c840cfd7d2d99d28a as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/95857016e643434c840cfd7d2d99d28a 2024-11-19T04:57:24,913 INFO [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/95857016e643434c840cfd7d2d99d28a, entries=1, sequenceid=85, filesize=5.9 K 2024-11-19T04:57:24,914 INFO [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 561a52b828b94f12e63c17503cb73505 in 30ms, sequenceid=85, compaction requested=false 2024-11-19T04:57:24,915 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/ec9a41cf56504961804a105169212d63, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/6bb6f8dabc93439aac717698d86d5655, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/eb095b4838c94bc8bd7526dfeca130f3, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/95b695d053fe4be2b24935fc7e57ac64, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/bd463863716442b98c948521d88e22fb, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/9bbe6802558141e49ebf357a56e4c6cc] to archive 2024-11-19T04:57:24,916 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T04:57:24,918 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/ec9a41cf56504961804a105169212d63 to hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/archive/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/ec9a41cf56504961804a105169212d63 2024-11-19T04:57:24,919 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/6bb6f8dabc93439aac717698d86d5655 to hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/archive/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/6bb6f8dabc93439aac717698d86d5655 2024-11-19T04:57:24,921 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/eb095b4838c94bc8bd7526dfeca130f3 to hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/archive/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/eb095b4838c94bc8bd7526dfeca130f3 2024-11-19T04:57:24,922 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/95b695d053fe4be2b24935fc7e57ac64 to hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/archive/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/95b695d053fe4be2b24935fc7e57ac64 2024-11-19T04:57:24,924 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/bd463863716442b98c948521d88e22fb to hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/archive/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/bd463863716442b98c948521d88e22fb 2024-11-19T04:57:24,925 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/9bbe6802558141e49ebf357a56e4c6cc to hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/archive/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/9bbe6802558141e49ebf357a56e4c6cc 2024-11-19T04:57:24,932 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/recovered.edits/88.seqid, newMaxSeqId=88, maxSeqId=1 2024-11-19T04:57:24,933 INFO [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505. 2024-11-19T04:57:24,933 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 561a52b828b94f12e63c17503cb73505: Waiting for close lock at 1731992244884Running coprocessor pre-close hooks at 1731992244884Disabling compacts and flushes for region at 1731992244884Disabling writes for close at 1731992244884Obtaining lock to block concurrent updates at 1731992244884Preparing flush snapshotting stores in 561a52b828b94f12e63c17503cb73505 at 1731992244884Finished memstore snapshotting TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731992244884Flushing stores of TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505. at 1731992244885 (+1 ms)Flushing 561a52b828b94f12e63c17503cb73505/info: creating writer at 1731992244885Flushing 561a52b828b94f12e63c17503cb73505/info: appending metadata at 1731992244888 (+3 ms)Flushing 561a52b828b94f12e63c17503cb73505/info: closing flushed file at 1731992244888Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@296c784c: reopening flushed file at 1731992244903 (+15 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 561a52b828b94f12e63c17503cb73505 in 30ms, sequenceid=85, compaction requested=false at 1731992244914 (+11 ms)Writing region close event to WAL at 1731992244928 (+14 ms)Running coprocessor post-close hooks at 1731992244933 (+5 ms)Closed at 1731992244933 2024-11-19T04:57:24,935 INFO [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 561a52b828b94f12e63c17503cb73505 2024-11-19T04:57:24,936 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=561a52b828b94f12e63c17503cb73505, regionState=CLOSED 2024-11-19T04:57:24,939 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 561a52b828b94f12e63c17503cb73505, server=08a7f35e60d4,42609,1731992231509 because future has completed 2024-11-19T04:57:24,943 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-19T04:57:24,943 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 561a52b828b94f12e63c17503cb73505, server=08a7f35e60d4,42609,1731992231509 in 215 msec 2024-11-19T04:57:24,945 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-19T04:57:24,945 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=561a52b828b94f12e63c17503cb73505, UNASSIGN in 225 msec 2024-11-19T04:57:24,955 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:57:24,959 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 2 storefiles, region=561a52b828b94f12e63c17503cb73505, threads=2 2024-11-19T04:57:24,961 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/95857016e643434c840cfd7d2d99d28a for region: 561a52b828b94f12e63c17503cb73505 2024-11-19T04:57:24,961 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/cd5fb7f44eae4692af0a680415b1cc0c for region: 561a52b828b94f12e63c17503cb73505 2024-11-19T04:57:24,972 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/95857016e643434c840cfd7d2d99d28a, top=true 2024-11-19T04:57:24,981 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/TestLogRolling-testLogRolling=561a52b828b94f12e63c17503cb73505-95857016e643434c840cfd7d2d99d28a for child: e3da184731fb44e76a4ad228b993df2a, parent: 561a52b828b94f12e63c17503cb73505 2024-11-19T04:57:24,981 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/95857016e643434c840cfd7d2d99d28a for region: 561a52b828b94f12e63c17503cb73505 2024-11-19T04:57:24,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741845_1021 (size=27) 2024-11-19T04:57:24,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741845_1021 (size=27) 2024-11-19T04:57:24,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741846_1022 (size=27) 2024-11-19T04:57:24,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741846_1022 (size=27) 2024-11-19T04:57:24,994 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/cd5fb7f44eae4692af0a680415b1cc0c for region: 561a52b828b94f12e63c17503cb73505 2024-11-19T04:57:24,997 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 561a52b828b94f12e63c17503cb73505 Daughter A: [hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/4979a9be9a43e82c8b5cdff164028294/info/cd5fb7f44eae4692af0a680415b1cc0c.561a52b828b94f12e63c17503cb73505] storefiles, Daughter B: [hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/TestLogRolling-testLogRolling=561a52b828b94f12e63c17503cb73505-95857016e643434c840cfd7d2d99d28a, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/cd5fb7f44eae4692af0a680415b1cc0c.561a52b828b94f12e63c17503cb73505] storefiles. 2024-11-19T04:57:25,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741847_1023 (size=71) 2024-11-19T04:57:25,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741847_1023 (size=71) 2024-11-19T04:57:25,007 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:57:25,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741848_1024 (size=71) 2024-11-19T04:57:25,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741848_1024 (size=71) 2024-11-19T04:57:25,021 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:57:25,031 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/4979a9be9a43e82c8b5cdff164028294/recovered.edits/88.seqid, newMaxSeqId=88, maxSeqId=-1 2024-11-19T04:57:25,034 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/recovered.edits/88.seqid, newMaxSeqId=88, maxSeqId=-1 2024-11-19T04:57:25,037 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731992245037"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731992245037"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731992245037"}]},"ts":"1731992245037"} 2024-11-19T04:57:25,037 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731992244704.4979a9be9a43e82c8b5cdff164028294.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731992245037"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731992245037"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731992245037"}]},"ts":"1731992245037"} 2024-11-19T04:57:25,037 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731992245037"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731992245037"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731992245037"}]},"ts":"1731992245037"} 2024-11-19T04:57:25,057 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4979a9be9a43e82c8b5cdff164028294, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e3da184731fb44e76a4ad228b993df2a, ASSIGN}] 2024-11-19T04:57:25,058 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4979a9be9a43e82c8b5cdff164028294, ASSIGN 2024-11-19T04:57:25,059 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e3da184731fb44e76a4ad228b993df2a, ASSIGN 2024-11-19T04:57:25,059 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4979a9be9a43e82c8b5cdff164028294, ASSIGN; state=SPLITTING_NEW, location=08a7f35e60d4,42609,1731992231509; forceNewPlan=false, retain=false 2024-11-19T04:57:25,059 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e3da184731fb44e76a4ad228b993df2a, ASSIGN; state=SPLITTING_NEW, location=08a7f35e60d4,42609,1731992231509; forceNewPlan=false, retain=false 2024-11-19T04:57:25,176 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:25,210 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=e3da184731fb44e76a4ad228b993df2a, regionState=OPENING, regionLocation=08a7f35e60d4,42609,1731992231509 2024-11-19T04:57:25,210 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=4979a9be9a43e82c8b5cdff164028294, regionState=OPENING, regionLocation=08a7f35e60d4,42609,1731992231509 2024-11-19T04:57:25,213 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e3da184731fb44e76a4ad228b993df2a, ASSIGN because future has completed 2024-11-19T04:57:25,213 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure e3da184731fb44e76a4ad228b993df2a, server=08a7f35e60d4,42609,1731992231509}] 2024-11-19T04:57:25,214 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4979a9be9a43e82c8b5cdff164028294, ASSIGN because future has completed 2024-11-19T04:57:25,215 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4979a9be9a43e82c8b5cdff164028294, server=08a7f35e60d4,42609,1731992231509}] 2024-11-19T04:57:25,368 INFO [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a. 2024-11-19T04:57:25,369 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => e3da184731fb44e76a4ad228b993df2a, NAME => 'TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-19T04:57:25,369 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling e3da184731fb44e76a4ad228b993df2a 2024-11-19T04:57:25,369 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:57:25,369 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for e3da184731fb44e76a4ad228b993df2a 2024-11-19T04:57:25,369 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for e3da184731fb44e76a4ad228b993df2a 2024-11-19T04:57:25,371 INFO [StoreOpener-e3da184731fb44e76a4ad228b993df2a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region e3da184731fb44e76a4ad228b993df2a 2024-11-19T04:57:25,371 INFO [StoreOpener-e3da184731fb44e76a4ad228b993df2a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e3da184731fb44e76a4ad228b993df2a columnFamilyName info 2024-11-19T04:57:25,371 DEBUG [StoreOpener-e3da184731fb44e76a4ad228b993df2a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:57:25,380 DEBUG [StoreOpener-e3da184731fb44e76a4ad228b993df2a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/TestLogRolling-testLogRolling=561a52b828b94f12e63c17503cb73505-95857016e643434c840cfd7d2d99d28a 2024-11-19T04:57:25,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:25,386 DEBUG [StoreOpener-e3da184731fb44e76a4ad228b993df2a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/cd5fb7f44eae4692af0a680415b1cc0c.561a52b828b94f12e63c17503cb73505->hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/cd5fb7f44eae4692af0a680415b1cc0c-top 2024-11-19T04:57:25,387 INFO [StoreOpener-e3da184731fb44e76a4ad228b993df2a-1 {}] regionserver.HStore(327): Store=e3da184731fb44e76a4ad228b993df2a/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T04:57:25,387 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for e3da184731fb44e76a4ad228b993df2a 2024-11-19T04:57:25,388 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a 2024-11-19T04:57:25,389 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a 2024-11-19T04:57:25,390 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for e3da184731fb44e76a4ad228b993df2a 2024-11-19T04:57:25,390 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for e3da184731fb44e76a4ad228b993df2a 2024-11-19T04:57:25,391 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for e3da184731fb44e76a4ad228b993df2a 2024-11-19T04:57:25,392 INFO [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened e3da184731fb44e76a4ad228b993df2a; next sequenceid=89; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=865113, jitterRate=0.10004894435405731}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T04:57:25,392 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e3da184731fb44e76a4ad228b993df2a 2024-11-19T04:57:25,393 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for e3da184731fb44e76a4ad228b993df2a: Running coprocessor pre-open hook at 1731992245369Writing region info on filesystem at 1731992245369Initializing all the Stores at 1731992245370 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992245370Cleaning up temporary data from old regions at 1731992245390 (+20 ms)Running coprocessor post-open hooks at 1731992245392 (+2 ms)Region opened successfully at 1731992245393 (+1 ms) 2024-11-19T04:57:25,394 INFO [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a., pid=12, masterSystemTime=1731992245365 2024-11-19T04:57:25,394 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store e3da184731fb44e76a4ad228b993df2a:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T04:57:25,394 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T04:57:25,394 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-19T04:57:25,395 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a. 2024-11-19T04:57:25,395 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HStore(1541): e3da184731fb44e76a4ad228b993df2a/info is initiating minor compaction (all files) 2024-11-19T04:57:25,395 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e3da184731fb44e76a4ad228b993df2a/info in TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a. 2024-11-19T04:57:25,396 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/cd5fb7f44eae4692af0a680415b1cc0c.561a52b828b94f12e63c17503cb73505->hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/cd5fb7f44eae4692af0a680415b1cc0c-top, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/TestLogRolling-testLogRolling=561a52b828b94f12e63c17503cb73505-95857016e643434c840cfd7d2d99d28a] into tmpdir=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp, totalSize=77.4 K 2024-11-19T04:57:25,396 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.Compactor(225): Compacting cd5fb7f44eae4692af0a680415b1cc0c.561a52b828b94f12e63c17503cb73505, keycount=31, bloomtype=ROW, size=71.5 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1731992242476 2024-11-19T04:57:25,397 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a. 2024-11-19T04:57:25,397 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=561a52b828b94f12e63c17503cb73505-95857016e643434c840cfd7d2d99d28a, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1731992244601 2024-11-19T04:57:25,397 INFO [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a. 2024-11-19T04:57:25,397 INFO [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731992244704.4979a9be9a43e82c8b5cdff164028294. 2024-11-19T04:57:25,397 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 4979a9be9a43e82c8b5cdff164028294, NAME => 'TestLogRolling-testLogRolling,,1731992244704.4979a9be9a43e82c8b5cdff164028294.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-19T04:57:25,397 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 4979a9be9a43e82c8b5cdff164028294 2024-11-19T04:57:25,397 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731992244704.4979a9be9a43e82c8b5cdff164028294.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:57:25,397 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 4979a9be9a43e82c8b5cdff164028294 2024-11-19T04:57:25,397 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 4979a9be9a43e82c8b5cdff164028294 2024-11-19T04:57:25,398 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=e3da184731fb44e76a4ad228b993df2a, regionState=OPEN, openSeqNum=89, regionLocation=08a7f35e60d4,42609,1731992231509 2024-11-19T04:57:25,399 INFO [StoreOpener-4979a9be9a43e82c8b5cdff164028294-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 4979a9be9a43e82c8b5cdff164028294 2024-11-19T04:57:25,400 INFO [StoreOpener-4979a9be9a43e82c8b5cdff164028294-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4979a9be9a43e82c8b5cdff164028294 columnFamilyName info 2024-11-19T04:57:25,400 DEBUG [StoreOpener-4979a9be9a43e82c8b5cdff164028294-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:57:25,400 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42609 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-19T04:57:25,400 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-19T04:57:25,401 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-11-19T04:57:25,401 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure e3da184731fb44e76a4ad228b993df2a, server=08a7f35e60d4,42609,1731992231509 because future has completed 2024-11-19T04:57:25,403 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35671 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=08a7f35e60d4,42609,1731992231509, table=TestLogRolling-testLogRolling, region=e3da184731fb44e76a4ad228b993df2a. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-19T04:57:25,406 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-19T04:57:25,406 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure e3da184731fb44e76a4ad228b993df2a, server=08a7f35e60d4,42609,1731992231509 in 190 msec 2024-11-19T04:57:25,408 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e3da184731fb44e76a4ad228b993df2a, ASSIGN in 349 msec 2024-11-19T04:57:25,410 DEBUG [StoreOpener-4979a9be9a43e82c8b5cdff164028294-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/4979a9be9a43e82c8b5cdff164028294/info/cd5fb7f44eae4692af0a680415b1cc0c.561a52b828b94f12e63c17503cb73505->hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/cd5fb7f44eae4692af0a680415b1cc0c-bottom 2024-11-19T04:57:25,411 INFO [StoreOpener-4979a9be9a43e82c8b5cdff164028294-1 {}] regionserver.HStore(327): Store=4979a9be9a43e82c8b5cdff164028294/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T04:57:25,411 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 4979a9be9a43e82c8b5cdff164028294 2024-11-19T04:57:25,412 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/4979a9be9a43e82c8b5cdff164028294 2024-11-19T04:57:25,413 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/4979a9be9a43e82c8b5cdff164028294 2024-11-19T04:57:25,414 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 4979a9be9a43e82c8b5cdff164028294 2024-11-19T04:57:25,414 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 4979a9be9a43e82c8b5cdff164028294 2024-11-19T04:57:25,416 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 4979a9be9a43e82c8b5cdff164028294 2024-11-19T04:57:25,417 INFO [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 4979a9be9a43e82c8b5cdff164028294; next sequenceid=89; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=778065, jitterRate=-0.010639667510986328}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T04:57:25,417 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4979a9be9a43e82c8b5cdff164028294 2024-11-19T04:57:25,417 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 4979a9be9a43e82c8b5cdff164028294: Running coprocessor pre-open hook at 1731992245398Writing region info on filesystem at 1731992245398Initializing all the Stores at 1731992245398Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992245398Cleaning up temporary data from old regions at 1731992245414 (+16 ms)Running coprocessor post-open hooks at 1731992245417 (+3 ms)Region opened successfully at 1731992245417 2024-11-19T04:57:25,418 INFO [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731992244704.4979a9be9a43e82c8b5cdff164028294., pid=13, masterSystemTime=1731992245365 2024-11-19T04:57:25,418 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 4979a9be9a43e82c8b5cdff164028294:info, priority=-2147483648, current under compaction store size is 2 2024-11-19T04:57:25,418 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T04:57:25,418 DEBUG [RS:0;08a7f35e60d4:42609-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-19T04:57:25,419 INFO [RS:0;08a7f35e60d4:42609-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731992244704.4979a9be9a43e82c8b5cdff164028294. 2024-11-19T04:57:25,419 DEBUG [RS:0;08a7f35e60d4:42609-longCompactions-0 {}] regionserver.HStore(1541): 4979a9be9a43e82c8b5cdff164028294/info is initiating minor compaction (all files) 2024-11-19T04:57:25,419 INFO [RS:0;08a7f35e60d4:42609-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4979a9be9a43e82c8b5cdff164028294/info in TestLogRolling-testLogRolling,,1731992244704.4979a9be9a43e82c8b5cdff164028294. 2024-11-19T04:57:25,419 INFO [RS:0;08a7f35e60d4:42609-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/4979a9be9a43e82c8b5cdff164028294/info/cd5fb7f44eae4692af0a680415b1cc0c.561a52b828b94f12e63c17503cb73505->hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/cd5fb7f44eae4692af0a680415b1cc0c-bottom] into tmpdir=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/4979a9be9a43e82c8b5cdff164028294/.tmp, totalSize=71.5 K 2024-11-19T04:57:25,420 DEBUG [RS:0;08a7f35e60d4:42609-longCompactions-0 {}] compactions.Compactor(225): Compacting cd5fb7f44eae4692af0a680415b1cc0c.561a52b828b94f12e63c17503cb73505, keycount=31, bloomtype=ROW, size=71.5 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1731992242476 2024-11-19T04:57:25,421 DEBUG [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731992244704.4979a9be9a43e82c8b5cdff164028294. 2024-11-19T04:57:25,421 INFO [RS_OPEN_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731992244704.4979a9be9a43e82c8b5cdff164028294. 2024-11-19T04:57:25,422 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=4979a9be9a43e82c8b5cdff164028294, regionState=OPEN, openSeqNum=89, regionLocation=08a7f35e60d4,42609,1731992231509 2024-11-19T04:57:25,425 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/hbase/meta/1588230740/.tmp/info/d3843dd75daf4ee4a5ac6df6539f4e49 is 193, key is TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a./info:regioninfo/1731992245398/Put/seqid=0 2024-11-19T04:57:25,425 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3da184731fb44e76a4ad228b993df2a#info#compaction#62 average throughput is 1.54 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T04:57:25,425 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4979a9be9a43e82c8b5cdff164028294, server=08a7f35e60d4,42609,1731992231509 because future has completed 2024-11-19T04:57:25,427 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/95daf33c87514c6eb4d8f5effa59c31d is 1080, key is row0062/info:/1731992244598/Put/seqid=0 2024-11-19T04:57:25,437 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=10 2024-11-19T04:57:25,437 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 4979a9be9a43e82c8b5cdff164028294, server=08a7f35e60d4,42609,1731992231509 in 218 msec 2024-11-19T04:57:25,442 INFO [RS:0;08a7f35e60d4:42609-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4979a9be9a43e82c8b5cdff164028294#info#compaction#63 average throughput is 31.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T04:57:25,443 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=7 2024-11-19T04:57:25,443 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4979a9be9a43e82c8b5cdff164028294, ASSIGN in 380 msec 2024-11-19T04:57:25,443 DEBUG [RS:0;08a7f35e60d4:42609-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/4979a9be9a43e82c8b5cdff164028294/.tmp/info/7ffcc281b05b4af68dc80f89da736a54 is 1080, key is row0001/info:/1731992242476/Put/seqid=0 2024-11-19T04:57:25,445 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=561a52b828b94f12e63c17503cb73505, daughterA=4979a9be9a43e82c8b5cdff164028294, daughterB=e3da184731fb44e76a4ad228b993df2a in 739 msec 2024-11-19T04:57:25,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741850_1026 (size=8359) 2024-11-19T04:57:25,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741850_1026 (size=8359) 2024-11-19T04:57:25,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741849_1025 (size=9882) 2024-11-19T04:57:25,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741849_1025 (size=9882) 2024-11-19T04:57:25,455 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/hbase/meta/1588230740/.tmp/info/d3843dd75daf4ee4a5ac6df6539f4e49 2024-11-19T04:57:25,458 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/95daf33c87514c6eb4d8f5effa59c31d as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/95daf33c87514c6eb4d8f5effa59c31d 2024-11-19T04:57:25,466 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 2 (all) file(s) in e3da184731fb44e76a4ad228b993df2a/info of e3da184731fb44e76a4ad228b993df2a into 95daf33c87514c6eb4d8f5effa59c31d(size=8.2 K), total size for store is 8.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T04:57:25,466 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e3da184731fb44e76a4ad228b993df2a: 2024-11-19T04:57:25,466 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a., storeName=e3da184731fb44e76a4ad228b993df2a/info, priority=14, startTime=1731992245394; duration=0sec 2024-11-19T04:57:25,466 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T04:57:25,466 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3da184731fb44e76a4ad228b993df2a:info 2024-11-19T04:57:25,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741851_1027 (size=70862) 2024-11-19T04:57:25,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741851_1027 (size=70862) 2024-11-19T04:57:25,477 DEBUG [RS:0;08a7f35e60d4:42609-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/4979a9be9a43e82c8b5cdff164028294/.tmp/info/7ffcc281b05b4af68dc80f89da736a54 as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/4979a9be9a43e82c8b5cdff164028294/info/7ffcc281b05b4af68dc80f89da736a54 2024-11-19T04:57:25,484 INFO [RS:0;08a7f35e60d4:42609-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 4979a9be9a43e82c8b5cdff164028294/info of 4979a9be9a43e82c8b5cdff164028294 into 7ffcc281b05b4af68dc80f89da736a54(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T04:57:25,484 DEBUG [RS:0;08a7f35e60d4:42609-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4979a9be9a43e82c8b5cdff164028294: 2024-11-19T04:57:25,484 INFO [RS:0;08a7f35e60d4:42609-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731992244704.4979a9be9a43e82c8b5cdff164028294., storeName=4979a9be9a43e82c8b5cdff164028294/info, priority=15, startTime=1731992245418; duration=0sec 2024-11-19T04:57:25,484 DEBUG [RS:0;08a7f35e60d4:42609-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T04:57:25,484 DEBUG [RS:0;08a7f35e60d4:42609-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4979a9be9a43e82c8b5cdff164028294:info 2024-11-19T04:57:25,490 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/hbase/meta/1588230740/.tmp/ns/b53910b3c5b24d03b67b3e6b51d35e76 is 43, key is default/ns:d/1731992232374/Put/seqid=0 2024-11-19T04:57:25,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741852_1028 (size=5153) 2024-11-19T04:57:25,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741852_1028 (size=5153) 2024-11-19T04:57:25,499 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/hbase/meta/1588230740/.tmp/ns/b53910b3c5b24d03b67b3e6b51d35e76 2024-11-19T04:57:25,523 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/hbase/meta/1588230740/.tmp/table/f0b6535bd6934224ae0f60a1bfe3bc05 is 65, key is TestLogRolling-testLogRolling/table:state/1731992232803/Put/seqid=0 2024-11-19T04:57:25,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741853_1029 (size=5340) 2024-11-19T04:57:25,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741853_1029 (size=5340) 2024-11-19T04:57:25,530 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/hbase/meta/1588230740/.tmp/table/f0b6535bd6934224ae0f60a1bfe3bc05 2024-11-19T04:57:25,536 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/hbase/meta/1588230740/.tmp/info/d3843dd75daf4ee4a5ac6df6539f4e49 as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/hbase/meta/1588230740/info/d3843dd75daf4ee4a5ac6df6539f4e49 2024-11-19T04:57:25,541 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/hbase/meta/1588230740/info/d3843dd75daf4ee4a5ac6df6539f4e49, entries=30, sequenceid=17, filesize=9.7 K 2024-11-19T04:57:25,542 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/hbase/meta/1588230740/.tmp/ns/b53910b3c5b24d03b67b3e6b51d35e76 as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/hbase/meta/1588230740/ns/b53910b3c5b24d03b67b3e6b51d35e76 2024-11-19T04:57:25,549 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/hbase/meta/1588230740/ns/b53910b3c5b24d03b67b3e6b51d35e76, entries=2, sequenceid=17, filesize=5.0 K 2024-11-19T04:57:25,550 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/hbase/meta/1588230740/.tmp/table/f0b6535bd6934224ae0f60a1bfe3bc05 as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/hbase/meta/1588230740/table/f0b6535bd6934224ae0f60a1bfe3bc05 2024-11-19T04:57:25,556 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/hbase/meta/1588230740/table/f0b6535bd6934224ae0f60a1bfe3bc05, entries=2, sequenceid=17, filesize=5.2 K 2024-11-19T04:57:25,557 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 157ms, sequenceid=17, compaction requested=false 2024-11-19T04:57:25,558 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-19T04:57:26,176 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:26,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:26,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42609 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:40418 deadline: 1731992256603, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505. is not online on 08a7f35e60d4,42609,1731992231509 2024-11-19T04:57:26,629 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505., hostname=08a7f35e60d4,42609,1731992231509, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505., hostname=08a7f35e60d4,42609,1731992231509, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505. is not online on 08a7f35e60d4,42609,1731992231509 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-19T04:57:26,630 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505., hostname=08a7f35e60d4,42609,1731992231509, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505. is not online on 08a7f35e60d4,42609,1731992231509 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-19T04:57:26,630 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731992232438.561a52b828b94f12e63c17503cb73505., hostname=08a7f35e60d4,42609,1731992231509, seqNum=2 from cache 2024-11-19T04:57:27,177 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:27,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:28,178 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:28,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:29,178 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:29,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:30,179 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:30,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:30,436 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T04:57:30,437 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:30,437 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:30,437 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:30,437 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:30,437 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:30,438 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:30,460 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:30,461 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:30,461 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:30,461 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:30,461 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:30,462 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:30,465 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:30,465 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:30,466 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:30,468 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T04:57:31,179 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:31,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:32,180 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:32,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:33,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:33,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:34,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:34,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:35,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:35,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:36,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:36,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:36,655 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0065', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a., hostname=08a7f35e60d4,42609,1731992231509, seqNum=89] 2024-11-19T04:57:36,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42609 {}] regionserver.HRegion(8855): Flush requested on e3da184731fb44e76a4ad228b993df2a 2024-11-19T04:57:36,668 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e3da184731fb44e76a4ad228b993df2a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T04:57:36,673 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/9d972cef1772428a91f5157e51b15e8e is 1080, key is row0065/info:/1731992256656/Put/seqid=0 2024-11-19T04:57:36,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741854_1030 (size=12509) 2024-11-19T04:57:36,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741854_1030 (size=12509) 2024-11-19T04:57:36,679 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=99 (bloomFilter=true), to=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/9d972cef1772428a91f5157e51b15e8e 2024-11-19T04:57:36,686 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/9d972cef1772428a91f5157e51b15e8e as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/9d972cef1772428a91f5157e51b15e8e 2024-11-19T04:57:36,692 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/9d972cef1772428a91f5157e51b15e8e, entries=7, sequenceid=99, filesize=12.2 K 2024-11-19T04:57:36,693 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for e3da184731fb44e76a4ad228b993df2a in 26ms, sequenceid=99, compaction requested=false 2024-11-19T04:57:36,693 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e3da184731fb44e76a4ad228b993df2a: 2024-11-19T04:57:36,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42609 {}] regionserver.HRegion(8855): Flush requested on e3da184731fb44e76a4ad228b993df2a 2024-11-19T04:57:36,695 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e3da184731fb44e76a4ad228b993df2a 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-19T04:57:36,699 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/66e76b7f2adb4dd5b49641cb516405e7 is 1080, key is row0072/info:/1731992256668/Put/seqid=0 2024-11-19T04:57:36,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741855_1031 (size=21141) 2024-11-19T04:57:36,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741855_1031 (size=21141) 2024-11-19T04:57:36,713 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/66e76b7f2adb4dd5b49641cb516405e7 2024-11-19T04:57:36,719 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/66e76b7f2adb4dd5b49641cb516405e7 as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/66e76b7f2adb4dd5b49641cb516405e7 2024-11-19T04:57:36,725 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/66e76b7f2adb4dd5b49641cb516405e7, entries=15, sequenceid=117, filesize=20.6 K 2024-11-19T04:57:36,726 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=10.51 KB/10760 for e3da184731fb44e76a4ad228b993df2a in 32ms, sequenceid=117, compaction requested=true 2024-11-19T04:57:36,726 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e3da184731fb44e76a4ad228b993df2a: 2024-11-19T04:57:36,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3da184731fb44e76a4ad228b993df2a:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T04:57:36,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T04:57:36,727 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T04:57:36,728 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42009 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T04:57:36,728 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HStore(1541): e3da184731fb44e76a4ad228b993df2a/info is initiating minor compaction (all files) 2024-11-19T04:57:36,728 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e3da184731fb44e76a4ad228b993df2a/info in TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a. 2024-11-19T04:57:36,728 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/95daf33c87514c6eb4d8f5effa59c31d, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/9d972cef1772428a91f5157e51b15e8e, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/66e76b7f2adb4dd5b49641cb516405e7] into tmpdir=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp, totalSize=41.0 K 2024-11-19T04:57:36,729 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 95daf33c87514c6eb4d8f5effa59c31d, keycount=3, bloomtype=ROW, size=8.2 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1731992244598 2024-11-19T04:57:36,729 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9d972cef1772428a91f5157e51b15e8e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1731992256656 2024-11-19T04:57:36,730 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 66e76b7f2adb4dd5b49641cb516405e7, keycount=15, bloomtype=ROW, size=20.6 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1731992256668 2024-11-19T04:57:36,743 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3da184731fb44e76a4ad228b993df2a#info#compaction#68 average throughput is 12.83 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T04:57:36,743 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/7901d326b9ff4e169f263abedfdcdaba is 1080, key is row0062/info:/1731992244598/Put/seqid=0 2024-11-19T04:57:36,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741856_1032 (size=32183) 2024-11-19T04:57:36,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741856_1032 (size=32183) 2024-11-19T04:57:36,756 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/7901d326b9ff4e169f263abedfdcdaba as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/7901d326b9ff4e169f263abedfdcdaba 2024-11-19T04:57:36,764 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e3da184731fb44e76a4ad228b993df2a/info of e3da184731fb44e76a4ad228b993df2a into 7901d326b9ff4e169f263abedfdcdaba(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T04:57:36,764 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e3da184731fb44e76a4ad228b993df2a: 2024-11-19T04:57:36,764 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a., storeName=e3da184731fb44e76a4ad228b993df2a/info, priority=13, startTime=1731992256726; duration=0sec 2024-11-19T04:57:36,764 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T04:57:36,764 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3da184731fb44e76a4ad228b993df2a:info 2024-11-19T04:57:37,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:37,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:38,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:38,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:38,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42609 {}] regionserver.HRegion(8855): Flush requested on e3da184731fb44e76a4ad228b993df2a 2024-11-19T04:57:38,714 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e3da184731fb44e76a4ad228b993df2a 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-19T04:57:38,719 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/5db4ff5f649d49069bae012b6ba78cc1 is 1080, key is row0087/info:/1731992256695/Put/seqid=0 2024-11-19T04:57:38,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741857_1033 (size=16819) 2024-11-19T04:57:38,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741857_1033 (size=16819) 2024-11-19T04:57:38,727 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/5db4ff5f649d49069bae012b6ba78cc1 2024-11-19T04:57:38,734 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/5db4ff5f649d49069bae012b6ba78cc1 as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/5db4ff5f649d49069bae012b6ba78cc1 2024-11-19T04:57:38,740 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/5db4ff5f649d49069bae012b6ba78cc1, entries=11, sequenceid=132, filesize=16.4 K 2024-11-19T04:57:38,742 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=13.66 KB/13988 for e3da184731fb44e76a4ad228b993df2a in 27ms, sequenceid=132, compaction requested=false 2024-11-19T04:57:38,742 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e3da184731fb44e76a4ad228b993df2a: 2024-11-19T04:57:38,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42609 {}] regionserver.HRegion(8855): Flush requested on e3da184731fb44e76a4ad228b993df2a 2024-11-19T04:57:38,742 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e3da184731fb44e76a4ad228b993df2a 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-19T04:57:38,747 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/bda441dd6bfa4ddea87aa505129ea67a is 1080, key is row0098/info:/1731992258716/Put/seqid=0 2024-11-19T04:57:38,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741858_1034 (size=20078) 2024-11-19T04:57:38,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741858_1034 (size=20078) 2024-11-19T04:57:38,755 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=149 (bloomFilter=true), to=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/bda441dd6bfa4ddea87aa505129ea67a 2024-11-19T04:57:38,762 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/bda441dd6bfa4ddea87aa505129ea67a as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/bda441dd6bfa4ddea87aa505129ea67a 2024-11-19T04:57:38,767 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/bda441dd6bfa4ddea87aa505129ea67a, entries=14, sequenceid=149, filesize=19.6 K 2024-11-19T04:57:38,769 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=12.61 KB/12912 for e3da184731fb44e76a4ad228b993df2a in 27ms, sequenceid=149, compaction requested=true 2024-11-19T04:57:38,769 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e3da184731fb44e76a4ad228b993df2a: 2024-11-19T04:57:38,769 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3da184731fb44e76a4ad228b993df2a:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T04:57:38,769 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T04:57:38,769 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T04:57:38,769 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e3da184731fb44e76a4ad228b993df2a 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-19T04:57:38,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42609 {}] regionserver.HRegion(8855): Flush requested on e3da184731fb44e76a4ad228b993df2a 2024-11-19T04:57:38,770 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 69080 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T04:57:38,771 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HStore(1541): e3da184731fb44e76a4ad228b993df2a/info is initiating minor compaction (all files) 2024-11-19T04:57:38,771 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e3da184731fb44e76a4ad228b993df2a/info in TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a. 2024-11-19T04:57:38,771 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/7901d326b9ff4e169f263abedfdcdaba, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/5db4ff5f649d49069bae012b6ba78cc1, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/bda441dd6bfa4ddea87aa505129ea67a] into tmpdir=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp, totalSize=67.5 K 2024-11-19T04:57:38,771 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7901d326b9ff4e169f263abedfdcdaba, keycount=25, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1731992244598 2024-11-19T04:57:38,772 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5db4ff5f649d49069bae012b6ba78cc1, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1731992256695 2024-11-19T04:57:38,773 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.Compactor(225): Compacting bda441dd6bfa4ddea87aa505129ea67a, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=149, earliestPutTs=1731992258716 2024-11-19T04:57:38,782 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/d28e764bbd7c4819b2941d7679e309a4 is 1080, key is row0112/info:/1731992258743/Put/seqid=0 2024-11-19T04:57:38,787 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3da184731fb44e76a4ad228b993df2a#info#compaction#72 average throughput is 51.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T04:57:38,788 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/f21f2cf5f5b44ab78ce5b84ae1919c73 is 1080, key is row0062/info:/1731992244598/Put/seqid=0 2024-11-19T04:57:38,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741859_1035 (size=19000) 2024-11-19T04:57:38,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741859_1035 (size=19000) 2024-11-19T04:57:38,792 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=165 (bloomFilter=true), to=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/d28e764bbd7c4819b2941d7679e309a4 2024-11-19T04:57:38,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741860_1036 (size=59266) 2024-11-19T04:57:38,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741860_1036 (size=59266) 2024-11-19T04:57:38,799 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/d28e764bbd7c4819b2941d7679e309a4 as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/d28e764bbd7c4819b2941d7679e309a4 2024-11-19T04:57:38,801 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/f21f2cf5f5b44ab78ce5b84ae1919c73 as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/f21f2cf5f5b44ab78ce5b84ae1919c73 2024-11-19T04:57:38,805 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/d28e764bbd7c4819b2941d7679e309a4, entries=13, sequenceid=165, filesize=18.6 K 2024-11-19T04:57:38,813 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=4.20 KB/4304 for e3da184731fb44e76a4ad228b993df2a in 44ms, sequenceid=165, compaction requested=false 2024-11-19T04:57:38,813 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e3da184731fb44e76a4ad228b993df2a: 2024-11-19T04:57:38,813 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e3da184731fb44e76a4ad228b993df2a/info of e3da184731fb44e76a4ad228b993df2a into f21f2cf5f5b44ab78ce5b84ae1919c73(size=57.9 K), total size for store is 76.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T04:57:38,813 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e3da184731fb44e76a4ad228b993df2a: 2024-11-19T04:57:38,813 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a., storeName=e3da184731fb44e76a4ad228b993df2a/info, priority=13, startTime=1731992258769; duration=0sec 2024-11-19T04:57:38,813 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T04:57:38,813 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3da184731fb44e76a4ad228b993df2a:info 2024-11-19T04:57:39,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:39,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:40,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:40,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:40,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42609 {}] regionserver.HRegion(8855): Flush requested on e3da184731fb44e76a4ad228b993df2a 2024-11-19T04:57:40,785 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e3da184731fb44e76a4ad228b993df2a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T04:57:40,790 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/5b9b688b59524aafb63454b8fa9013ad is 1080, key is row0125/info:/1731992258771/Put/seqid=0 2024-11-19T04:57:40,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741861_1037 (size=12516) 2024-11-19T04:57:40,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741861_1037 (size=12516) 2024-11-19T04:57:40,797 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/5b9b688b59524aafb63454b8fa9013ad 2024-11-19T04:57:40,803 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/5b9b688b59524aafb63454b8fa9013ad as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/5b9b688b59524aafb63454b8fa9013ad 2024-11-19T04:57:40,809 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/5b9b688b59524aafb63454b8fa9013ad, entries=7, sequenceid=176, filesize=12.2 K 2024-11-19T04:57:40,810 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for e3da184731fb44e76a4ad228b993df2a in 24ms, sequenceid=176, compaction requested=true 2024-11-19T04:57:40,810 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e3da184731fb44e76a4ad228b993df2a: 2024-11-19T04:57:40,810 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3da184731fb44e76a4ad228b993df2a:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T04:57:40,810 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T04:57:40,810 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T04:57:40,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42609 {}] regionserver.HRegion(8855): Flush requested on e3da184731fb44e76a4ad228b993df2a 2024-11-19T04:57:40,811 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e3da184731fb44e76a4ad228b993df2a 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-19T04:57:40,811 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 90782 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T04:57:40,811 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HStore(1541): e3da184731fb44e76a4ad228b993df2a/info is initiating minor compaction (all files) 2024-11-19T04:57:40,812 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e3da184731fb44e76a4ad228b993df2a/info in TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a. 2024-11-19T04:57:40,812 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/f21f2cf5f5b44ab78ce5b84ae1919c73, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/d28e764bbd7c4819b2941d7679e309a4, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/5b9b688b59524aafb63454b8fa9013ad] into tmpdir=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp, totalSize=88.7 K 2024-11-19T04:57:40,812 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.Compactor(225): Compacting f21f2cf5f5b44ab78ce5b84ae1919c73, keycount=50, bloomtype=ROW, size=57.9 K, encoding=NONE, compression=NONE, seqNum=149, earliestPutTs=1731992244598 2024-11-19T04:57:40,813 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.Compactor(225): Compacting d28e764bbd7c4819b2941d7679e309a4, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1731992258743 2024-11-19T04:57:40,813 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5b9b688b59524aafb63454b8fa9013ad, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1731992258771 2024-11-19T04:57:40,816 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/5fb3220cb382473faddc14cd8ba4eb7a is 1080, key is row0132/info:/1731992260786/Put/seqid=0 2024-11-19T04:57:40,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741862_1038 (size=21156) 2024-11-19T04:57:40,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741862_1038 (size=21156) 2024-11-19T04:57:40,835 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3da184731fb44e76a4ad228b993df2a#info#compaction#75 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T04:57:40,836 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/fe9a3b7228fa426094e5b428c041c9d9 is 1080, key is row0062/info:/1731992244598/Put/seqid=0 2024-11-19T04:57:40,836 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/5fb3220cb382473faddc14cd8ba4eb7a 2024-11-19T04:57:40,844 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/5fb3220cb382473faddc14cd8ba4eb7a as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/5fb3220cb382473faddc14cd8ba4eb7a 2024-11-19T04:57:40,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741863_1039 (size=81065) 2024-11-19T04:57:40,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741863_1039 (size=81065) 2024-11-19T04:57:40,850 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/5fb3220cb382473faddc14cd8ba4eb7a, entries=15, sequenceid=194, filesize=20.7 K 2024-11-19T04:57:40,851 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=14.71 KB/15064 for e3da184731fb44e76a4ad228b993df2a in 40ms, sequenceid=194, compaction requested=false 2024-11-19T04:57:40,852 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e3da184731fb44e76a4ad228b993df2a: 2024-11-19T04:57:40,853 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/fe9a3b7228fa426094e5b428c041c9d9 as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/fe9a3b7228fa426094e5b428c041c9d9 2024-11-19T04:57:40,860 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e3da184731fb44e76a4ad228b993df2a/info of e3da184731fb44e76a4ad228b993df2a into fe9a3b7228fa426094e5b428c041c9d9(size=79.2 K), total size for store is 99.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T04:57:40,860 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e3da184731fb44e76a4ad228b993df2a: 2024-11-19T04:57:40,860 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a., storeName=e3da184731fb44e76a4ad228b993df2a/info, priority=13, startTime=1731992260810; duration=0sec 2024-11-19T04:57:40,860 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T04:57:40,860 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3da184731fb44e76a4ad228b993df2a:info 2024-11-19T04:57:41,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:41,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:41,440 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T04:57:42,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:42,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:42,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42609 {}] regionserver.HRegion(8855): Flush requested on e3da184731fb44e76a4ad228b993df2a 2024-11-19T04:57:42,844 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e3da184731fb44e76a4ad228b993df2a 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-19T04:57:42,849 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/4e8b734eac174d4d9d483499ade2ee6b is 1080, key is row0147/info:/1731992260813/Put/seqid=0 2024-11-19T04:57:42,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741864_1040 (size=21156) 2024-11-19T04:57:42,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741864_1040 (size=21156) 2024-11-19T04:57:42,862 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/4e8b734eac174d4d9d483499ade2ee6b 2024-11-19T04:57:42,868 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/4e8b734eac174d4d9d483499ade2ee6b as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/4e8b734eac174d4d9d483499ade2ee6b 2024-11-19T04:57:42,868 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42609 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=e3da184731fb44e76a4ad228b993df2a, server=08a7f35e60d4,42609,1731992231509 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-19T04:57:42,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42609 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:40418 deadline: 1731992272868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=e3da184731fb44e76a4ad228b993df2a, server=08a7f35e60d4,42609,1731992231509 2024-11-19T04:57:42,869 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a., hostname=08a7f35e60d4,42609,1731992231509, seqNum=89 , the old value is region=TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a., hostname=08a7f35e60d4,42609,1731992231509, seqNum=89, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=e3da184731fb44e76a4ad228b993df2a, server=08a7f35e60d4,42609,1731992231509 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-19T04:57:42,869 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a., hostname=08a7f35e60d4,42609,1731992231509, seqNum=89 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=e3da184731fb44e76a4ad228b993df2a, server=08a7f35e60d4,42609,1731992231509 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-19T04:57:42,869 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a., hostname=08a7f35e60d4,42609,1731992231509, seqNum=89 because the exception is null or not the one we care about 2024-11-19T04:57:42,873 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/4e8b734eac174d4d9d483499ade2ee6b, entries=15, sequenceid=213, filesize=20.7 K 2024-11-19T04:57:42,874 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=14.71 KB/15064 for e3da184731fb44e76a4ad228b993df2a in 30ms, sequenceid=213, compaction requested=true 2024-11-19T04:57:42,874 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e3da184731fb44e76a4ad228b993df2a: 2024-11-19T04:57:42,874 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3da184731fb44e76a4ad228b993df2a:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T04:57:42,874 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T04:57:42,874 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T04:57:42,875 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 123377 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T04:57:42,875 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HStore(1541): e3da184731fb44e76a4ad228b993df2a/info is initiating minor compaction (all files) 2024-11-19T04:57:42,875 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e3da184731fb44e76a4ad228b993df2a/info in TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a. 2024-11-19T04:57:42,876 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/fe9a3b7228fa426094e5b428c041c9d9, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/5fb3220cb382473faddc14cd8ba4eb7a, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/4e8b734eac174d4d9d483499ade2ee6b] into tmpdir=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp, totalSize=120.5 K 2024-11-19T04:57:42,876 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.Compactor(225): Compacting fe9a3b7228fa426094e5b428c041c9d9, keycount=70, bloomtype=ROW, size=79.2 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1731992244598 2024-11-19T04:57:42,876 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5fb3220cb382473faddc14cd8ba4eb7a, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1731992260786 2024-11-19T04:57:42,877 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4e8b734eac174d4d9d483499ade2ee6b, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1731992260813 2024-11-19T04:57:42,889 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3da184731fb44e76a4ad228b993df2a#info#compaction#77 average throughput is 51.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T04:57:42,890 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/1f17e3dedb3c485ba6d7da4892e1291f is 1080, key is row0062/info:/1731992244598/Put/seqid=0 2024-11-19T04:57:42,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741865_1041 (size=113515) 2024-11-19T04:57:42,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741865_1041 (size=113515) 2024-11-19T04:57:42,901 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/1f17e3dedb3c485ba6d7da4892e1291f as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/1f17e3dedb3c485ba6d7da4892e1291f 2024-11-19T04:57:42,907 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e3da184731fb44e76a4ad228b993df2a/info of e3da184731fb44e76a4ad228b993df2a into 1f17e3dedb3c485ba6d7da4892e1291f(size=110.9 K), total size for store is 110.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T04:57:42,907 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e3da184731fb44e76a4ad228b993df2a: 2024-11-19T04:57:42,907 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a., storeName=e3da184731fb44e76a4ad228b993df2a/info, priority=13, startTime=1731992262874; duration=0sec 2024-11-19T04:57:42,907 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T04:57:42,907 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3da184731fb44e76a4ad228b993df2a:info 2024-11-19T04:57:43,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:43,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:44,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:44,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:45,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:45,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:46,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:46,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:47,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:47,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:48,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:48,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:49,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:49,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:50,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:50,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:51,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:51,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:52,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:52,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:52,399 INFO [master/08a7f35e60d4:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-19T04:57:52,399 INFO [master/08a7f35e60d4:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-19T04:57:52,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42609 {}] regionserver.HRegion(8855): Flush requested on e3da184731fb44e76a4ad228b993df2a 2024-11-19T04:57:52,951 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e3da184731fb44e76a4ad228b993df2a 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-19T04:57:52,956 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/93697579a27d4c0bba0d6c13bd928d87 is 1080, key is row0162/info:/1731992262845/Put/seqid=0 2024-11-19T04:57:52,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741866_1042 (size=21156) 2024-11-19T04:57:52,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741866_1042 (size=21156) 2024-11-19T04:57:52,963 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/93697579a27d4c0bba0d6c13bd928d87 2024-11-19T04:57:52,973 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/93697579a27d4c0bba0d6c13bd928d87 as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/93697579a27d4c0bba0d6c13bd928d87 2024-11-19T04:57:52,977 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42609 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=e3da184731fb44e76a4ad228b993df2a, server=08a7f35e60d4,42609,1731992231509 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-19T04:57:52,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42609 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:40418 deadline: 1731992282976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=e3da184731fb44e76a4ad228b993df2a, server=08a7f35e60d4,42609,1731992231509 2024-11-19T04:57:52,978 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a., hostname=08a7f35e60d4,42609,1731992231509, seqNum=89 , the old value is region=TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a., hostname=08a7f35e60d4,42609,1731992231509, seqNum=89, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=e3da184731fb44e76a4ad228b993df2a, server=08a7f35e60d4,42609,1731992231509 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-19T04:57:52,978 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a., hostname=08a7f35e60d4,42609,1731992231509, seqNum=89 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=e3da184731fb44e76a4ad228b993df2a, server=08a7f35e60d4,42609,1731992231509 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-19T04:57:52,978 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a., hostname=08a7f35e60d4,42609,1731992231509, seqNum=89 because the exception is null or not the one we care about 2024-11-19T04:57:52,980 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/93697579a27d4c0bba0d6c13bd928d87, entries=15, sequenceid=232, filesize=20.7 K 2024-11-19T04:57:52,981 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=14.71 KB/15064 for e3da184731fb44e76a4ad228b993df2a in 31ms, sequenceid=232, compaction requested=false 2024-11-19T04:57:52,981 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e3da184731fb44e76a4ad228b993df2a: 2024-11-19T04:57:53,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:53,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:54,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:54,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:55,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:55,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:56,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:56,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:57,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:57,341 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20375 2024-11-19T04:57:57,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:58,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:58,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:59,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:57:59,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:00,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:00,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:01,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:01,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:02,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:02,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:03,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42609 {}] regionserver.HRegion(8855): Flush requested on e3da184731fb44e76a4ad228b993df2a 2024-11-19T04:58:03,000 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e3da184731fb44e76a4ad228b993df2a 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-19T04:58:03,005 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/371f74827edd4060b4767677b9aa5d62 is 1080, key is row0177/info:/1731992272952/Put/seqid=0 2024-11-19T04:58:03,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741867_1043 (size=21156) 2024-11-19T04:58:03,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741867_1043 (size=21156) 2024-11-19T04:58:03,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:03,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:03,415 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/371f74827edd4060b4767677b9aa5d62 2024-11-19T04:58:03,422 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/371f74827edd4060b4767677b9aa5d62 as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/371f74827edd4060b4767677b9aa5d62 2024-11-19T04:58:03,427 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/371f74827edd4060b4767677b9aa5d62, entries=15, sequenceid=250, filesize=20.7 K 2024-11-19T04:58:03,428 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=1.05 KB/1076 for e3da184731fb44e76a4ad228b993df2a in 428ms, sequenceid=250, compaction requested=true 2024-11-19T04:58:03,428 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e3da184731fb44e76a4ad228b993df2a: 2024-11-19T04:58:03,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3da184731fb44e76a4ad228b993df2a:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T04:58:03,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T04:58:03,428 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T04:58:03,429 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 155827 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T04:58:03,429 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HStore(1541): e3da184731fb44e76a4ad228b993df2a/info is initiating minor compaction (all files) 2024-11-19T04:58:03,429 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e3da184731fb44e76a4ad228b993df2a/info in TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a. 2024-11-19T04:58:03,429 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/1f17e3dedb3c485ba6d7da4892e1291f, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/93697579a27d4c0bba0d6c13bd928d87, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/371f74827edd4060b4767677b9aa5d62] into tmpdir=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp, totalSize=152.2 K 2024-11-19T04:58:03,430 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1f17e3dedb3c485ba6d7da4892e1291f, keycount=100, bloomtype=ROW, size=110.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1731992244598 2024-11-19T04:58:03,430 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 93697579a27d4c0bba0d6c13bd928d87, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1731992262845 2024-11-19T04:58:03,430 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 371f74827edd4060b4767677b9aa5d62, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1731992272952 2024-11-19T04:58:03,442 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3da184731fb44e76a4ad228b993df2a#info#compaction#80 average throughput is 44.47 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T04:58:03,443 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/fabe563a9ff14ae4b23475e5d7bece6e is 1080, key is row0062/info:/1731992244598/Put/seqid=0 2024-11-19T04:58:03,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741868_1044 (size=146162) 2024-11-19T04:58:03,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741868_1044 (size=146162) 2024-11-19T04:58:03,454 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/fabe563a9ff14ae4b23475e5d7bece6e as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/fabe563a9ff14ae4b23475e5d7bece6e 2024-11-19T04:58:03,461 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e3da184731fb44e76a4ad228b993df2a/info of e3da184731fb44e76a4ad228b993df2a into fabe563a9ff14ae4b23475e5d7bece6e(size=142.7 K), total size for store is 142.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T04:58:03,462 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e3da184731fb44e76a4ad228b993df2a: 2024-11-19T04:58:03,462 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a., storeName=e3da184731fb44e76a4ad228b993df2a/info, priority=13, startTime=1731992283428; duration=0sec 2024-11-19T04:58:03,462 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T04:58:03,462 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3da184731fb44e76a4ad228b993df2a:info 2024-11-19T04:58:04,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:04,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:05,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42609 {}] regionserver.HRegion(8855): Flush requested on e3da184731fb44e76a4ad228b993df2a 2024-11-19T04:58:05,013 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e3da184731fb44e76a4ad228b993df2a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T04:58:05,018 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/75d1c392fab14f45addf1abc34d55c35 is 1080, key is row0192/info:/1731992283002/Put/seqid=0 2024-11-19T04:58:05,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741869_1045 (size=12521) 2024-11-19T04:58:05,025 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=261 (bloomFilter=true), to=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/75d1c392fab14f45addf1abc34d55c35 2024-11-19T04:58:05,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741869_1045 (size=12521) 2024-11-19T04:58:05,031 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/75d1c392fab14f45addf1abc34d55c35 as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/75d1c392fab14f45addf1abc34d55c35 2024-11-19T04:58:05,037 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/75d1c392fab14f45addf1abc34d55c35, entries=7, sequenceid=261, filesize=12.2 K 2024-11-19T04:58:05,038 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for e3da184731fb44e76a4ad228b993df2a in 25ms, sequenceid=261, compaction requested=false 2024-11-19T04:58:05,038 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e3da184731fb44e76a4ad228b993df2a: 2024-11-19T04:58:05,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42609 {}] regionserver.HRegion(8855): Flush requested on e3da184731fb44e76a4ad228b993df2a 2024-11-19T04:58:05,039 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e3da184731fb44e76a4ad228b993df2a 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-19T04:58:05,044 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/cda1d805def64391b5b017cc5cc99bfb is 1080, key is row0199/info:/1731992285014/Put/seqid=0 2024-11-19T04:58:05,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741870_1046 (size=20092) 2024-11-19T04:58:05,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741870_1046 (size=20092) 2024-11-19T04:58:05,057 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/cda1d805def64391b5b017cc5cc99bfb 2024-11-19T04:58:05,062 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/cda1d805def64391b5b017cc5cc99bfb as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/cda1d805def64391b5b017cc5cc99bfb 2024-11-19T04:58:05,067 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/cda1d805def64391b5b017cc5cc99bfb, entries=14, sequenceid=278, filesize=19.6 K 2024-11-19T04:58:05,068 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=12.61 KB/12912 for e3da184731fb44e76a4ad228b993df2a in 30ms, sequenceid=278, compaction requested=true 2024-11-19T04:58:05,069 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e3da184731fb44e76a4ad228b993df2a: 2024-11-19T04:58:05,069 DEBUG [RS:0;08a7f35e60d4:42609-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T04:58:05,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3da184731fb44e76a4ad228b993df2a:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T04:58:05,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T04:58:05,073 DEBUG [RS:0;08a7f35e60d4:42609-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 178775 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T04:58:05,073 DEBUG [RS:0;08a7f35e60d4:42609-longCompactions-0 {}] regionserver.HStore(1541): e3da184731fb44e76a4ad228b993df2a/info is initiating minor compaction (all files) 2024-11-19T04:58:05,073 INFO [RS:0;08a7f35e60d4:42609-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e3da184731fb44e76a4ad228b993df2a/info in TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a. 2024-11-19T04:58:05,073 INFO [RS:0;08a7f35e60d4:42609-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/fabe563a9ff14ae4b23475e5d7bece6e, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/75d1c392fab14f45addf1abc34d55c35, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/cda1d805def64391b5b017cc5cc99bfb] into tmpdir=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp, totalSize=174.6 K 2024-11-19T04:58:05,074 DEBUG [RS:0;08a7f35e60d4:42609-longCompactions-0 {}] compactions.Compactor(225): Compacting fabe563a9ff14ae4b23475e5d7bece6e, keycount=130, bloomtype=ROW, size=142.7 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1731992244598 2024-11-19T04:58:05,074 DEBUG [RS:0;08a7f35e60d4:42609-longCompactions-0 {}] compactions.Compactor(225): Compacting 75d1c392fab14f45addf1abc34d55c35, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1731992283002 2024-11-19T04:58:05,074 DEBUG [RS:0;08a7f35e60d4:42609-longCompactions-0 {}] compactions.Compactor(225): Compacting cda1d805def64391b5b017cc5cc99bfb, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1731992285014 2024-11-19T04:58:05,095 INFO [RS:0;08a7f35e60d4:42609-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3da184731fb44e76a4ad228b993df2a#info#compaction#83 average throughput is 77.47 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T04:58:05,095 DEBUG [RS:0;08a7f35e60d4:42609-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/02a77b41338344c49349860a0dcdaa54 is 1080, key is row0062/info:/1731992244598/Put/seqid=0 2024-11-19T04:58:05,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741871_1047 (size=168925) 2024-11-19T04:58:05,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741871_1047 (size=168925) 2024-11-19T04:58:05,113 DEBUG [RS:0;08a7f35e60d4:42609-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/02a77b41338344c49349860a0dcdaa54 as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/02a77b41338344c49349860a0dcdaa54 2024-11-19T04:58:05,119 INFO [RS:0;08a7f35e60d4:42609-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e3da184731fb44e76a4ad228b993df2a/info of e3da184731fb44e76a4ad228b993df2a into 02a77b41338344c49349860a0dcdaa54(size=165.0 K), total size for store is 165.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T04:58:05,119 DEBUG [RS:0;08a7f35e60d4:42609-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e3da184731fb44e76a4ad228b993df2a: 2024-11-19T04:58:05,119 INFO [RS:0;08a7f35e60d4:42609-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a., storeName=e3da184731fb44e76a4ad228b993df2a/info, priority=13, startTime=1731992285069; duration=0sec 2024-11-19T04:58:05,119 DEBUG [RS:0;08a7f35e60d4:42609-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T04:58:05,119 DEBUG [RS:0;08a7f35e60d4:42609-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3da184731fb44e76a4ad228b993df2a:info 2024-11-19T04:58:05,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:05,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:06,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:06,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:07,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42609 {}] regionserver.HRegion(8855): Flush requested on e3da184731fb44e76a4ad228b993df2a 2024-11-19T04:58:07,062 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e3da184731fb44e76a4ad228b993df2a 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-19T04:58:07,067 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/0b0bde71b26a40c8b790ec1ed101947c is 1080, key is row0213/info:/1731992285040/Put/seqid=0 2024-11-19T04:58:07,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741872_1048 (size=19013) 2024-11-19T04:58:07,074 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/0b0bde71b26a40c8b790ec1ed101947c 2024-11-19T04:58:07,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741872_1048 (size=19013) 2024-11-19T04:58:07,080 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/0b0bde71b26a40c8b790ec1ed101947c as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/0b0bde71b26a40c8b790ec1ed101947c 2024-11-19T04:58:07,085 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/0b0bde71b26a40c8b790ec1ed101947c, entries=13, sequenceid=295, filesize=18.6 K 2024-11-19T04:58:07,086 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=14.71 KB/15064 for e3da184731fb44e76a4ad228b993df2a in 24ms, sequenceid=295, compaction requested=false 2024-11-19T04:58:07,086 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e3da184731fb44e76a4ad228b993df2a: 2024-11-19T04:58:07,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42609 {}] regionserver.HRegion(8855): Flush requested on e3da184731fb44e76a4ad228b993df2a 2024-11-19T04:58:07,088 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e3da184731fb44e76a4ad228b993df2a 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-19T04:58:07,093 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/a6474c8f63d247a08d79fce7a6f575ac is 1080, key is row0226/info:/1731992287063/Put/seqid=0 2024-11-19T04:58:07,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741873_1049 (size=21171) 2024-11-19T04:58:07,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741873_1049 (size=21171) 2024-11-19T04:58:07,108 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=313 (bloomFilter=true), to=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/a6474c8f63d247a08d79fce7a6f575ac 2024-11-19T04:58:07,112 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42609 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=e3da184731fb44e76a4ad228b993df2a, server=08a7f35e60d4,42609,1731992231509 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-19T04:58:07,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42609 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:40418 deadline: 1731992297111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=e3da184731fb44e76a4ad228b993df2a, server=08a7f35e60d4,42609,1731992231509 2024-11-19T04:58:07,113 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a., hostname=08a7f35e60d4,42609,1731992231509, seqNum=89 , the old value is region=TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a., hostname=08a7f35e60d4,42609,1731992231509, seqNum=89, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=e3da184731fb44e76a4ad228b993df2a, server=08a7f35e60d4,42609,1731992231509 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-19T04:58:07,113 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a., hostname=08a7f35e60d4,42609,1731992231509, seqNum=89 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=e3da184731fb44e76a4ad228b993df2a, server=08a7f35e60d4,42609,1731992231509 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-19T04:58:07,113 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a., hostname=08a7f35e60d4,42609,1731992231509, seqNum=89 because the exception is null or not the one we care about 2024-11-19T04:58:07,115 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/a6474c8f63d247a08d79fce7a6f575ac as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/a6474c8f63d247a08d79fce7a6f575ac 2024-11-19T04:58:07,121 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/a6474c8f63d247a08d79fce7a6f575ac, entries=15, sequenceid=313, filesize=20.7 K 2024-11-19T04:58:07,122 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=14.71 KB/15064 for e3da184731fb44e76a4ad228b993df2a in 34ms, sequenceid=313, compaction requested=true 2024-11-19T04:58:07,122 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e3da184731fb44e76a4ad228b993df2a: 2024-11-19T04:58:07,122 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e3da184731fb44e76a4ad228b993df2a:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T04:58:07,122 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T04:58:07,122 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T04:58:07,123 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 209109 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T04:58:07,123 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HStore(1541): e3da184731fb44e76a4ad228b993df2a/info is initiating minor compaction (all files) 2024-11-19T04:58:07,123 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e3da184731fb44e76a4ad228b993df2a/info in TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a. 2024-11-19T04:58:07,123 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/02a77b41338344c49349860a0dcdaa54, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/0b0bde71b26a40c8b790ec1ed101947c, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/a6474c8f63d247a08d79fce7a6f575ac] into tmpdir=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp, totalSize=204.2 K 2024-11-19T04:58:07,124 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 02a77b41338344c49349860a0dcdaa54, keycount=151, bloomtype=ROW, size=165.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1731992244598 2024-11-19T04:58:07,124 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0b0bde71b26a40c8b790ec1ed101947c, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1731992285040 2024-11-19T04:58:07,125 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] compactions.Compactor(225): Compacting a6474c8f63d247a08d79fce7a6f575ac, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1731992287063 2024-11-19T04:58:07,140 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e3da184731fb44e76a4ad228b993df2a#info#compaction#86 average throughput is 61.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T04:58:07,141 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/fac1bb3f1f8749828e0f4be6081b233c is 1080, key is row0062/info:/1731992244598/Put/seqid=0 2024-11-19T04:58:07,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741874_1050 (size=199247) 2024-11-19T04:58:07,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741874_1050 (size=199247) 2024-11-19T04:58:07,157 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/fac1bb3f1f8749828e0f4be6081b233c as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/fac1bb3f1f8749828e0f4be6081b233c 2024-11-19T04:58:07,163 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e3da184731fb44e76a4ad228b993df2a/info of e3da184731fb44e76a4ad228b993df2a into fac1bb3f1f8749828e0f4be6081b233c(size=194.6 K), total size for store is 194.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T04:58:07,163 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e3da184731fb44e76a4ad228b993df2a: 2024-11-19T04:58:07,163 INFO [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a., storeName=e3da184731fb44e76a4ad228b993df2a/info, priority=13, startTime=1731992287122; duration=0sec 2024-11-19T04:58:07,164 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T04:58:07,164 DEBUG [RS:0;08a7f35e60d4:42609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e3da184731fb44e76a4ad228b993df2a:info 2024-11-19T04:58:07,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:07,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:08,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:08,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:09,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:09,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:10,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:10,369 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region e3da184731fb44e76a4ad228b993df2a, had cached 0 bytes from a total of 199247 2024-11-19T04:58:10,398 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 4979a9be9a43e82c8b5cdff164028294, had cached 0 bytes from a total of 70862 2024-11-19T04:58:10,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:11,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:11,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:11,440 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T04:58:12,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:12,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:12,960 DEBUG [master/08a7f35e60d4:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-19T04:58:13,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:13,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:14,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:14,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:15,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:15,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:16,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:16,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:17,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42609 {}] regionserver.HRegion(8855): Flush requested on e3da184731fb44e76a4ad228b993df2a 2024-11-19T04:58:17,129 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e3da184731fb44e76a4ad228b993df2a 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-19T04:58:17,134 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/71c65fd3fc1844a69dee27674d236ab2 is 1080, key is row0241/info:/1731992287089/Put/seqid=0 2024-11-19T04:58:17,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741875_1051 (size=21171) 2024-11-19T04:58:17,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741875_1051 (size=21171) 2024-11-19T04:58:17,141 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/71c65fd3fc1844a69dee27674d236ab2 2024-11-19T04:58:17,147 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/71c65fd3fc1844a69dee27674d236ab2 as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/71c65fd3fc1844a69dee27674d236ab2 2024-11-19T04:58:17,152 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/71c65fd3fc1844a69dee27674d236ab2, entries=15, sequenceid=332, filesize=20.7 K 2024-11-19T04:58:17,153 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=1.05 KB/1076 for e3da184731fb44e76a4ad228b993df2a in 24ms, sequenceid=332, compaction requested=false 2024-11-19T04:58:17,153 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e3da184731fb44e76a4ad228b993df2a: 2024-11-19T04:58:17,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:17,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:17,573 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=3, created chunk count=9, reused chunk count=71, reuseRatio=88.75% 2024-11-19T04:58:17,574 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-19T04:58:18,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:18,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:19,132 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-19T04:58:19,133 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C42609%2C1731992231509.1731992299132 2024-11-19T04:58:19,149 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:19,149 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:19,149 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:19,149 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:19,149 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:19,149 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/WALs/08a7f35e60d4,42609,1731992231509/08a7f35e60d4%2C42609%2C1731992231509.1731992231908 with entries=313, filesize=308.60 KB; new WAL /user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/WALs/08a7f35e60d4,42609,1731992231509/08a7f35e60d4%2C42609%2C1731992231509.1731992299132 2024-11-19T04:58:19,150 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36255:36255),(127.0.0.1/127.0.0.1:36015:36015)] 2024-11-19T04:58:19,151 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/WALs/08a7f35e60d4,42609,1731992231509/08a7f35e60d4%2C42609%2C1731992231509.1731992231908 is not closed yet, will try archiving it next time 2024-11-19T04:58:19,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741833_1009 (size=316019) 2024-11-19T04:58:19,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741833_1009 (size=316019) 2024-11-19T04:58:19,154 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-11-19T04:58:19,158 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/hbase/meta/1588230740/.tmp/info/24ac1b7099504a788bf62ad3df83b7b2 is 186, key is TestLogRolling-testLogRolling,,1731992244704.4979a9be9a43e82c8b5cdff164028294./info:regioninfo/1731992245422/Put/seqid=0 2024-11-19T04:58:19,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741877_1053 (size=6153) 2024-11-19T04:58:19,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741877_1053 (size=6153) 2024-11-19T04:58:19,163 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/hbase/meta/1588230740/.tmp/info/24ac1b7099504a788bf62ad3df83b7b2 2024-11-19T04:58:19,169 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/hbase/meta/1588230740/.tmp/info/24ac1b7099504a788bf62ad3df83b7b2 as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/hbase/meta/1588230740/info/24ac1b7099504a788bf62ad3df83b7b2 2024-11-19T04:58:19,173 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/hbase/meta/1588230740/info/24ac1b7099504a788bf62ad3df83b7b2, entries=5, sequenceid=21, filesize=6.0 K 2024-11-19T04:58:19,174 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 20ms, sequenceid=21, compaction requested=false 2024-11-19T04:58:19,174 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-19T04:58:19,175 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 4979a9be9a43e82c8b5cdff164028294: 2024-11-19T04:58:19,175 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing e3da184731fb44e76a4ad228b993df2a 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-19T04:58:19,186 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/e6d7be54d9404e55a6d5e65deb27ccf1 is 1080, key is row0256/info:/1731992297131/Put/seqid=0 2024-11-19T04:58:19,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741878_1054 (size=6035) 2024-11-19T04:58:19,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741878_1054 (size=6035) 2024-11-19T04:58:19,191 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/e6d7be54d9404e55a6d5e65deb27ccf1 2024-11-19T04:58:19,196 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/.tmp/info/e6d7be54d9404e55a6d5e65deb27ccf1 as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/e6d7be54d9404e55a6d5e65deb27ccf1 2024-11-19T04:58:19,201 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/e6d7be54d9404e55a6d5e65deb27ccf1, entries=1, sequenceid=336, filesize=5.9 K 2024-11-19T04:58:19,202 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for e3da184731fb44e76a4ad228b993df2a in 27ms, sequenceid=336, compaction requested=true 2024-11-19T04:58:19,202 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for e3da184731fb44e76a4ad228b993df2a: 2024-11-19T04:58:19,202 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C42609%2C1731992231509.1731992299202 2024-11-19T04:58:19,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:19,207 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:19,207 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:19,207 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:19,207 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:19,207 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:19,207 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/WALs/08a7f35e60d4,42609,1731992231509/08a7f35e60d4%2C42609%2C1731992231509.1731992299132 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/WALs/08a7f35e60d4,42609,1731992231509/08a7f35e60d4%2C42609%2C1731992231509.1731992299202 2024-11-19T04:58:19,208 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36015:36015),(127.0.0.1/127.0.0.1:36255:36255)] 2024-11-19T04:58:19,209 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/WALs/08a7f35e60d4,42609,1731992231509/08a7f35e60d4%2C42609%2C1731992231509.1731992299132 is not closed yet, will try archiving it next time 2024-11-19T04:58:19,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741876_1052 (size=731) 2024-11-19T04:58:19,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741876_1052 (size=731) 2024-11-19T04:58:19,209 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/WALs/08a7f35e60d4,42609,1731992231509/08a7f35e60d4%2C42609%2C1731992231509.1731992231908 to hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/oldWALs/08a7f35e60d4%2C42609%2C1731992231509.1731992231908 2024-11-19T04:58:19,209 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-19T04:58:19,210 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/WALs/08a7f35e60d4,42609,1731992231509/08a7f35e60d4%2C42609%2C1731992231509.1731992299132 to hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/oldWALs/08a7f35e60d4%2C42609%2C1731992231509.1731992299132 2024-11-19T04:58:19,310 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T04:58:19,310 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T04:58:19,310 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T04:58:19,310 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:58:19,310 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:58:19,310 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T04:58:19,310 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T04:58:19,310 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=93976500, stopped=false 2024-11-19T04:58:19,311 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=08a7f35e60d4,35671,1731992231458 2024-11-19T04:58:19,312 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35671-0x1012e9608a20000, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T04:58:19,312 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42609-0x1012e9608a20001, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T04:58:19,312 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35671-0x1012e9608a20000, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:58:19,312 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T04:58:19,312 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42609-0x1012e9608a20001, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:58:19,312 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T04:58:19,312 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T04:58:19,312 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:58:19,313 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '08a7f35e60d4,42609,1731992231509' ***** 2024-11-19T04:58:19,313 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T04:58:19,313 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42609-0x1012e9608a20001, quorum=127.0.0.1:54523, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T04:58:19,313 INFO [RS:0;08a7f35e60d4:42609 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T04:58:19,313 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35671-0x1012e9608a20000, quorum=127.0.0.1:54523, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T04:58:19,313 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T04:58:19,313 INFO [RS:0;08a7f35e60d4:42609 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T04:58:19,314 INFO [RS:0;08a7f35e60d4:42609 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T04:58:19,314 INFO [RS:0;08a7f35e60d4:42609 {}] regionserver.HRegionServer(3091): Received CLOSE for 4979a9be9a43e82c8b5cdff164028294 2024-11-19T04:58:19,314 INFO [RS:0;08a7f35e60d4:42609 {}] regionserver.HRegionServer(3091): Received CLOSE for e3da184731fb44e76a4ad228b993df2a 2024-11-19T04:58:19,314 INFO [RS:0;08a7f35e60d4:42609 {}] regionserver.HRegionServer(959): stopping server 08a7f35e60d4,42609,1731992231509 2024-11-19T04:58:19,314 INFO [RS:0;08a7f35e60d4:42609 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T04:58:19,314 INFO [RS:0;08a7f35e60d4:42609 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;08a7f35e60d4:42609. 2024-11-19T04:58:19,314 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 4979a9be9a43e82c8b5cdff164028294, disabling compactions & flushes 2024-11-19T04:58:19,314 DEBUG [RS:0;08a7f35e60d4:42609 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T04:58:19,314 INFO [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731992244704.4979a9be9a43e82c8b5cdff164028294. 2024-11-19T04:58:19,314 DEBUG [RS:0;08a7f35e60d4:42609 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:58:19,314 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731992244704.4979a9be9a43e82c8b5cdff164028294. 2024-11-19T04:58:19,314 INFO [RS:0;08a7f35e60d4:42609 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T04:58:19,314 INFO [RS:0;08a7f35e60d4:42609 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T04:58:19,314 INFO [RS:0;08a7f35e60d4:42609 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T04:58:19,314 INFO [RS:0;08a7f35e60d4:42609 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T04:58:19,314 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731992244704.4979a9be9a43e82c8b5cdff164028294. after waiting 0 ms 2024-11-19T04:58:19,314 INFO [RS:0;08a7f35e60d4:42609 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-19T04:58:19,315 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731992244704.4979a9be9a43e82c8b5cdff164028294. 2024-11-19T04:58:19,315 DEBUG [RS:0;08a7f35e60d4:42609 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 4979a9be9a43e82c8b5cdff164028294=TestLogRolling-testLogRolling,,1731992244704.4979a9be9a43e82c8b5cdff164028294., e3da184731fb44e76a4ad228b993df2a=TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a.} 2024-11-19T04:58:19,315 DEBUG [RS:0;08a7f35e60d4:42609 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 4979a9be9a43e82c8b5cdff164028294, e3da184731fb44e76a4ad228b993df2a 2024-11-19T04:58:19,315 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T04:58:19,315 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T04:58:19,315 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T04:58:19,315 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T04:58:19,315 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T04:58:19,315 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731992244704.4979a9be9a43e82c8b5cdff164028294.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/4979a9be9a43e82c8b5cdff164028294/info/cd5fb7f44eae4692af0a680415b1cc0c.561a52b828b94f12e63c17503cb73505->hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/cd5fb7f44eae4692af0a680415b1cc0c-bottom] to archive 2024-11-19T04:58:19,316 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731992244704.4979a9be9a43e82c8b5cdff164028294.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T04:58:19,318 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731992244704.4979a9be9a43e82c8b5cdff164028294.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/4979a9be9a43e82c8b5cdff164028294/info/cd5fb7f44eae4692af0a680415b1cc0c.561a52b828b94f12e63c17503cb73505 to hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/archive/data/default/TestLogRolling-testLogRolling/4979a9be9a43e82c8b5cdff164028294/info/cd5fb7f44eae4692af0a680415b1cc0c.561a52b828b94f12e63c17503cb73505 2024-11-19T04:58:19,318 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731992244704.4979a9be9a43e82c8b5cdff164028294.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=08a7f35e60d4:35671 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-19T04:58:19,319 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731992244704.4979a9be9a43e82c8b5cdff164028294.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-19T04:58:19,322 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-19T04:58:19,322 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T04:58:19,322 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T04:58:19,323 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731992299315Running coprocessor pre-close hooks at 1731992299315Disabling compacts and flushes for region at 1731992299315Disabling writes for close at 1731992299315Writing region close event to WAL at 1731992299316 (+1 ms)Running coprocessor post-close hooks at 1731992299322 (+6 ms)Closed at 1731992299322 2024-11-19T04:58:19,323 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T04:58:19,323 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/4979a9be9a43e82c8b5cdff164028294/recovered.edits/93.seqid, newMaxSeqId=93, maxSeqId=88 2024-11-19T04:58:19,324 INFO [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731992244704.4979a9be9a43e82c8b5cdff164028294. 2024-11-19T04:58:19,324 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 4979a9be9a43e82c8b5cdff164028294: Waiting for close lock at 1731992299314Running coprocessor pre-close hooks at 1731992299314Disabling compacts and flushes for region at 1731992299314Disabling writes for close at 1731992299315 (+1 ms)Writing region close event to WAL at 1731992299319 (+4 ms)Running coprocessor post-close hooks at 1731992299324 (+5 ms)Closed at 1731992299324 2024-11-19T04:58:19,324 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731992244704.4979a9be9a43e82c8b5cdff164028294. 2024-11-19T04:58:19,324 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing e3da184731fb44e76a4ad228b993df2a, disabling compactions & flushes 2024-11-19T04:58:19,324 INFO [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a. 2024-11-19T04:58:19,324 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a. 2024-11-19T04:58:19,324 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a. after waiting 0 ms 2024-11-19T04:58:19,324 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a. 2024-11-19T04:58:19,325 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/cd5fb7f44eae4692af0a680415b1cc0c.561a52b828b94f12e63c17503cb73505->hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/561a52b828b94f12e63c17503cb73505/info/cd5fb7f44eae4692af0a680415b1cc0c-top, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/95daf33c87514c6eb4d8f5effa59c31d, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/TestLogRolling-testLogRolling=561a52b828b94f12e63c17503cb73505-95857016e643434c840cfd7d2d99d28a, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/9d972cef1772428a91f5157e51b15e8e, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/7901d326b9ff4e169f263abedfdcdaba, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/66e76b7f2adb4dd5b49641cb516405e7, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/5db4ff5f649d49069bae012b6ba78cc1, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/f21f2cf5f5b44ab78ce5b84ae1919c73, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/bda441dd6bfa4ddea87aa505129ea67a, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/d28e764bbd7c4819b2941d7679e309a4, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/fe9a3b7228fa426094e5b428c041c9d9, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/5b9b688b59524aafb63454b8fa9013ad, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/5fb3220cb382473faddc14cd8ba4eb7a, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/1f17e3dedb3c485ba6d7da4892e1291f, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/4e8b734eac174d4d9d483499ade2ee6b, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/93697579a27d4c0bba0d6c13bd928d87, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/fabe563a9ff14ae4b23475e5d7bece6e, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/371f74827edd4060b4767677b9aa5d62, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/75d1c392fab14f45addf1abc34d55c35, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/02a77b41338344c49349860a0dcdaa54, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/cda1d805def64391b5b017cc5cc99bfb, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/0b0bde71b26a40c8b790ec1ed101947c, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/a6474c8f63d247a08d79fce7a6f575ac] to archive 2024-11-19T04:58:19,326 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T04:58:19,328 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/cd5fb7f44eae4692af0a680415b1cc0c.561a52b828b94f12e63c17503cb73505 to hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/archive/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/cd5fb7f44eae4692af0a680415b1cc0c.561a52b828b94f12e63c17503cb73505 2024-11-19T04:58:19,329 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/95daf33c87514c6eb4d8f5effa59c31d to hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/archive/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/95daf33c87514c6eb4d8f5effa59c31d 2024-11-19T04:58:19,330 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/TestLogRolling-testLogRolling=561a52b828b94f12e63c17503cb73505-95857016e643434c840cfd7d2d99d28a to hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/archive/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/TestLogRolling-testLogRolling=561a52b828b94f12e63c17503cb73505-95857016e643434c840cfd7d2d99d28a 2024-11-19T04:58:19,332 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/9d972cef1772428a91f5157e51b15e8e to hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/archive/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/9d972cef1772428a91f5157e51b15e8e 2024-11-19T04:58:19,333 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/7901d326b9ff4e169f263abedfdcdaba to hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/archive/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/7901d326b9ff4e169f263abedfdcdaba 2024-11-19T04:58:19,335 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/66e76b7f2adb4dd5b49641cb516405e7 to hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/archive/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/66e76b7f2adb4dd5b49641cb516405e7 2024-11-19T04:58:19,336 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/5db4ff5f649d49069bae012b6ba78cc1 to hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/archive/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/5db4ff5f649d49069bae012b6ba78cc1 2024-11-19T04:58:19,337 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/f21f2cf5f5b44ab78ce5b84ae1919c73 to hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/archive/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/f21f2cf5f5b44ab78ce5b84ae1919c73 2024-11-19T04:58:19,339 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/bda441dd6bfa4ddea87aa505129ea67a to hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/archive/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/bda441dd6bfa4ddea87aa505129ea67a 2024-11-19T04:58:19,340 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/d28e764bbd7c4819b2941d7679e309a4 to hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/archive/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/d28e764bbd7c4819b2941d7679e309a4 2024-11-19T04:58:19,341 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/fe9a3b7228fa426094e5b428c041c9d9 to hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/archive/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/fe9a3b7228fa426094e5b428c041c9d9 2024-11-19T04:58:19,342 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/5b9b688b59524aafb63454b8fa9013ad to hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/archive/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/5b9b688b59524aafb63454b8fa9013ad 2024-11-19T04:58:19,343 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/5fb3220cb382473faddc14cd8ba4eb7a to hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/archive/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/5fb3220cb382473faddc14cd8ba4eb7a 2024-11-19T04:58:19,345 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/1f17e3dedb3c485ba6d7da4892e1291f to hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/archive/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/1f17e3dedb3c485ba6d7da4892e1291f 2024-11-19T04:58:19,346 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/4e8b734eac174d4d9d483499ade2ee6b to hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/archive/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/4e8b734eac174d4d9d483499ade2ee6b 2024-11-19T04:58:19,347 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/93697579a27d4c0bba0d6c13bd928d87 to hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/archive/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/93697579a27d4c0bba0d6c13bd928d87 2024-11-19T04:58:19,348 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/fabe563a9ff14ae4b23475e5d7bece6e to hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/archive/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/fabe563a9ff14ae4b23475e5d7bece6e 2024-11-19T04:58:19,349 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/371f74827edd4060b4767677b9aa5d62 to hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/archive/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/371f74827edd4060b4767677b9aa5d62 2024-11-19T04:58:19,351 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/75d1c392fab14f45addf1abc34d55c35 to hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/archive/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/75d1c392fab14f45addf1abc34d55c35 2024-11-19T04:58:19,352 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/02a77b41338344c49349860a0dcdaa54 to hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/archive/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/02a77b41338344c49349860a0dcdaa54 2024-11-19T04:58:19,353 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/cda1d805def64391b5b017cc5cc99bfb to hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/archive/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/cda1d805def64391b5b017cc5cc99bfb 2024-11-19T04:58:19,354 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/0b0bde71b26a40c8b790ec1ed101947c to hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/archive/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/0b0bde71b26a40c8b790ec1ed101947c 2024-11-19T04:58:19,355 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/a6474c8f63d247a08d79fce7a6f575ac to hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/archive/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/info/a6474c8f63d247a08d79fce7a6f575ac 2024-11-19T04:58:19,356 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [95daf33c87514c6eb4d8f5effa59c31d=8359, 9d972cef1772428a91f5157e51b15e8e=12509, 7901d326b9ff4e169f263abedfdcdaba=32183, 66e76b7f2adb4dd5b49641cb516405e7=21141, 5db4ff5f649d49069bae012b6ba78cc1=16819, f21f2cf5f5b44ab78ce5b84ae1919c73=59266, bda441dd6bfa4ddea87aa505129ea67a=20078, d28e764bbd7c4819b2941d7679e309a4=19000, fe9a3b7228fa426094e5b428c041c9d9=81065, 5b9b688b59524aafb63454b8fa9013ad=12516, 5fb3220cb382473faddc14cd8ba4eb7a=21156, 1f17e3dedb3c485ba6d7da4892e1291f=113515, 4e8b734eac174d4d9d483499ade2ee6b=21156, 93697579a27d4c0bba0d6c13bd928d87=21156, fabe563a9ff14ae4b23475e5d7bece6e=146162, 371f74827edd4060b4767677b9aa5d62=21156, 75d1c392fab14f45addf1abc34d55c35=12521, 02a77b41338344c49349860a0dcdaa54=168925, cda1d805def64391b5b017cc5cc99bfb=20092, 0b0bde71b26a40c8b790ec1ed101947c=19013, a6474c8f63d247a08d79fce7a6f575ac=21171] 2024-11-19T04:58:19,359 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/data/default/TestLogRolling-testLogRolling/e3da184731fb44e76a4ad228b993df2a/recovered.edits/339.seqid, newMaxSeqId=339, maxSeqId=88 2024-11-19T04:58:19,360 INFO [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a. 2024-11-19T04:58:19,360 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for e3da184731fb44e76a4ad228b993df2a: Waiting for close lock at 1731992299324Running coprocessor pre-close hooks at 1731992299324Disabling compacts and flushes for region at 1731992299324Disabling writes for close at 1731992299324Writing region close event to WAL at 1731992299356 (+32 ms)Running coprocessor post-close hooks at 1731992299360 (+4 ms)Closed at 1731992299360 2024-11-19T04:58:19,360 DEBUG [RS_CLOSE_REGION-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731992244704.e3da184731fb44e76a4ad228b993df2a. 2024-11-19T04:58:19,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:19,515 INFO [RS:0;08a7f35e60d4:42609 {}] regionserver.HRegionServer(976): stopping server 08a7f35e60d4,42609,1731992231509; all regions closed. 2024-11-19T04:58:19,515 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:19,515 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:19,516 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:19,516 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:19,516 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:19,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741834_1010 (size=8107) 2024-11-19T04:58:19,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741834_1010 (size=8107) 2024-11-19T04:58:19,522 DEBUG [RS:0;08a7f35e60d4:42609 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/oldWALs 2024-11-19T04:58:19,522 INFO [RS:0;08a7f35e60d4:42609 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 08a7f35e60d4%2C42609%2C1731992231509.meta:.meta(num 1731992232331) 2024-11-19T04:58:19,522 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:19,522 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:19,522 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:19,522 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:19,522 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:19,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741879_1055 (size=778) 2024-11-19T04:58:19,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741879_1055 (size=778) 2024-11-19T04:58:19,527 DEBUG [RS:0;08a7f35e60d4:42609 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/oldWALs 2024-11-19T04:58:19,527 INFO [RS:0;08a7f35e60d4:42609 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 08a7f35e60d4%2C42609%2C1731992231509:(num 1731992299202) 2024-11-19T04:58:19,527 DEBUG [RS:0;08a7f35e60d4:42609 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:58:19,527 INFO [RS:0;08a7f35e60d4:42609 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T04:58:19,527 INFO [RS:0;08a7f35e60d4:42609 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T04:58:19,527 INFO [RS:0;08a7f35e60d4:42609 {}] hbase.ChoreService(370): Chore service for: regionserver/08a7f35e60d4:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-19T04:58:19,527 INFO [RS:0;08a7f35e60d4:42609 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T04:58:19,527 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T04:58:19,527 INFO [RS:0;08a7f35e60d4:42609 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42609 2024-11-19T04:58:19,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42609-0x1012e9608a20001, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/08a7f35e60d4,42609,1731992231509 2024-11-19T04:58:19,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35671-0x1012e9608a20000, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T04:58:19,529 INFO [RS:0;08a7f35e60d4:42609 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T04:58:19,531 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [08a7f35e60d4,42609,1731992231509] 2024-11-19T04:58:19,533 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/08a7f35e60d4,42609,1731992231509 already deleted, retry=false 2024-11-19T04:58:19,533 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 08a7f35e60d4,42609,1731992231509 expired; onlineServers=0 2024-11-19T04:58:19,533 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '08a7f35e60d4,35671,1731992231458' ***** 2024-11-19T04:58:19,533 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T04:58:19,533 INFO [M:0;08a7f35e60d4:35671 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T04:58:19,534 INFO [M:0;08a7f35e60d4:35671 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T04:58:19,534 DEBUG [M:0;08a7f35e60d4:35671 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T04:58:19,534 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T04:58:19,534 DEBUG [M:0;08a7f35e60d4:35671 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T04:58:19,534 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.large.0-1731992231704 {}] cleaner.HFileCleaner(306): Exit Thread[master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.large.0-1731992231704,5,FailOnTimeoutGroup] 2024-11-19T04:58:19,534 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.small.0-1731992231704 {}] cleaner.HFileCleaner(306): Exit Thread[master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.small.0-1731992231704,5,FailOnTimeoutGroup] 2024-11-19T04:58:19,534 INFO [M:0;08a7f35e60d4:35671 {}] hbase.ChoreService(370): Chore service for: master/08a7f35e60d4:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T04:58:19,534 INFO [M:0;08a7f35e60d4:35671 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T04:58:19,534 DEBUG [M:0;08a7f35e60d4:35671 {}] master.HMaster(1795): Stopping service threads 2024-11-19T04:58:19,534 INFO [M:0;08a7f35e60d4:35671 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T04:58:19,534 INFO [M:0;08a7f35e60d4:35671 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T04:58:19,534 INFO [M:0;08a7f35e60d4:35671 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T04:58:19,534 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T04:58:19,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35671-0x1012e9608a20000, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T04:58:19,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35671-0x1012e9608a20000, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:58:19,536 DEBUG [M:0;08a7f35e60d4:35671 {}] zookeeper.ZKUtil(347): master:35671-0x1012e9608a20000, quorum=127.0.0.1:54523, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T04:58:19,536 WARN [M:0;08a7f35e60d4:35671 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T04:58:19,537 INFO [M:0;08a7f35e60d4:35671 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/.lastflushedseqids 2024-11-19T04:58:19,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741880_1056 (size=228) 2024-11-19T04:58:19,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741880_1056 (size=228) 2024-11-19T04:58:19,543 INFO [M:0;08a7f35e60d4:35671 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T04:58:19,543 INFO [M:0;08a7f35e60d4:35671 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T04:58:19,544 DEBUG [M:0;08a7f35e60d4:35671 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T04:58:19,544 INFO [M:0;08a7f35e60d4:35671 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:58:19,544 DEBUG [M:0;08a7f35e60d4:35671 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:58:19,544 DEBUG [M:0;08a7f35e60d4:35671 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T04:58:19,544 DEBUG [M:0;08a7f35e60d4:35671 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:58:19,544 INFO [M:0;08a7f35e60d4:35671 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.43 KB heapSize=63.36 KB 2024-11-19T04:58:19,567 DEBUG [M:0;08a7f35e60d4:35671 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8acb36e32efb43288892fd29b8398b3f is 82, key is hbase:meta,,1/info:regioninfo/1731992232355/Put/seqid=0 2024-11-19T04:58:19,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741881_1057 (size=5672) 2024-11-19T04:58:19,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741881_1057 (size=5672) 2024-11-19T04:58:19,573 INFO [M:0;08a7f35e60d4:35671 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8acb36e32efb43288892fd29b8398b3f 2024-11-19T04:58:19,604 DEBUG [M:0;08a7f35e60d4:35671 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/926dc9cca9d94a7ca8a5899438533c7e is 751, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731992232808/Put/seqid=0 2024-11-19T04:58:19,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741882_1058 (size=7091) 2024-11-19T04:58:19,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741882_1058 (size=7091) 2024-11-19T04:58:19,609 INFO [M:0;08a7f35e60d4:35671 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.83 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/926dc9cca9d94a7ca8a5899438533c7e 2024-11-19T04:58:19,614 INFO [M:0;08a7f35e60d4:35671 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 926dc9cca9d94a7ca8a5899438533c7e 2024-11-19T04:58:19,630 DEBUG [M:0;08a7f35e60d4:35671 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/237d75bebf0644b9961a8a196712eeb6 is 69, key is 08a7f35e60d4,42609,1731992231509/rs:state/1731992231750/Put/seqid=0 2024-11-19T04:58:19,631 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42609-0x1012e9608a20001, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T04:58:19,631 INFO [RS:0;08a7f35e60d4:42609 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T04:58:19,631 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42609-0x1012e9608a20001, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T04:58:19,631 INFO [RS:0;08a7f35e60d4:42609 {}] regionserver.HRegionServer(1031): Exiting; stopping=08a7f35e60d4,42609,1731992231509; zookeeper connection closed. 2024-11-19T04:58:19,631 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@31fd5298 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@31fd5298 2024-11-19T04:58:19,631 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-19T04:58:19,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741883_1059 (size=5156) 2024-11-19T04:58:19,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741883_1059 (size=5156) 2024-11-19T04:58:19,639 INFO [M:0;08a7f35e60d4:35671 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/237d75bebf0644b9961a8a196712eeb6 2024-11-19T04:58:19,659 DEBUG [M:0;08a7f35e60d4:35671 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/896a85af57c84fda91e08756d55e95fd is 52, key is load_balancer_on/state:d/1731992232434/Put/seqid=0 2024-11-19T04:58:19,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741884_1060 (size=5056) 2024-11-19T04:58:19,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741884_1060 (size=5056) 2024-11-19T04:58:19,666 INFO [M:0;08a7f35e60d4:35671 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/896a85af57c84fda91e08756d55e95fd 2024-11-19T04:58:19,671 DEBUG [M:0;08a7f35e60d4:35671 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8acb36e32efb43288892fd29b8398b3f as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8acb36e32efb43288892fd29b8398b3f 2024-11-19T04:58:19,676 INFO [M:0;08a7f35e60d4:35671 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8acb36e32efb43288892fd29b8398b3f, entries=8, sequenceid=125, filesize=5.5 K 2024-11-19T04:58:19,677 DEBUG [M:0;08a7f35e60d4:35671 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/926dc9cca9d94a7ca8a5899438533c7e as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/926dc9cca9d94a7ca8a5899438533c7e 2024-11-19T04:58:19,681 INFO [M:0;08a7f35e60d4:35671 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 926dc9cca9d94a7ca8a5899438533c7e 2024-11-19T04:58:19,682 INFO [M:0;08a7f35e60d4:35671 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/926dc9cca9d94a7ca8a5899438533c7e, entries=13, sequenceid=125, filesize=6.9 K 2024-11-19T04:58:19,682 DEBUG [M:0;08a7f35e60d4:35671 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/237d75bebf0644b9961a8a196712eeb6 as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/237d75bebf0644b9961a8a196712eeb6 2024-11-19T04:58:19,686 INFO [M:0;08a7f35e60d4:35671 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/237d75bebf0644b9961a8a196712eeb6, entries=1, sequenceid=125, filesize=5.0 K 2024-11-19T04:58:19,687 DEBUG [M:0;08a7f35e60d4:35671 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/896a85af57c84fda91e08756d55e95fd as hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/896a85af57c84fda91e08756d55e95fd 2024-11-19T04:58:19,692 INFO [M:0;08a7f35e60d4:35671 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40207/user/jenkins/test-data/18b7c5cf-c14e-36e2-7586-77aaa7f06f9d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/896a85af57c84fda91e08756d55e95fd, entries=1, sequenceid=125, filesize=4.9 K 2024-11-19T04:58:19,693 INFO [M:0;08a7f35e60d4:35671 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 149ms, sequenceid=125, compaction requested=false 2024-11-19T04:58:19,694 INFO [M:0;08a7f35e60d4:35671 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:58:19,694 DEBUG [M:0;08a7f35e60d4:35671 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731992299544Disabling compacts and flushes for region at 1731992299544Disabling writes for close at 1731992299544Obtaining lock to block concurrent updates at 1731992299544Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731992299544Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52663, getHeapSize=64816, getOffHeapSize=0, getCellsCount=148 at 1731992299544Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731992299545 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731992299545Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731992299566 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731992299566Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731992299586 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731992299603 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731992299603Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731992299614 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731992299630 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731992299630Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731992299643 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731992299659 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731992299659Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@e5abfc8: reopening flushed file at 1731992299671 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4ad9ef95: reopening flushed file at 1731992299677 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@41ce7631: reopening flushed file at 1731992299682 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@36f09b49: reopening flushed file at 1731992299686 (+4 ms)Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 149ms, sequenceid=125, compaction requested=false at 1731992299693 (+7 ms)Writing region close event to WAL at 1731992299694 (+1 ms)Closed at 1731992299694 2024-11-19T04:58:19,695 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:19,695 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:19,695 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:19,695 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:19,695 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:19,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39975 is added to blk_1073741830_1006 (size=61332) 2024-11-19T04:58:19,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36847 is added to blk_1073741830_1006 (size=61332) 2024-11-19T04:58:19,698 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T04:58:19,698 INFO [M:0;08a7f35e60d4:35671 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T04:58:19,698 INFO [M:0;08a7f35e60d4:35671 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35671 2024-11-19T04:58:19,698 INFO [M:0;08a7f35e60d4:35671 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T04:58:19,775 INFO [regionserver/08a7f35e60d4:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T04:58:19,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35671-0x1012e9608a20000, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T04:58:19,800 INFO [M:0;08a7f35e60d4:35671 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T04:58:19,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35671-0x1012e9608a20000, quorum=127.0.0.1:54523, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T04:58:19,803 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@652d6e37{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:58:19,804 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@54fcac{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T04:58:19,804 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T04:58:19,804 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@53298b3d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T04:58:19,804 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5096343{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/hadoop.log.dir/,STOPPED} 2024-11-19T04:58:19,806 WARN [BP-486461787-172.17.0.2-1731992230528 heartbeating to localhost/127.0.0.1:40207 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T04:58:19,806 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T04:58:19,806 WARN [BP-486461787-172.17.0.2-1731992230528 heartbeating to localhost/127.0.0.1:40207 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-486461787-172.17.0.2-1731992230528 (Datanode Uuid c7b5a726-e8a2-4f29-8687-82ac74f60bcd) service to localhost/127.0.0.1:40207 2024-11-19T04:58:19,806 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T04:58:19,807 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/cluster_de53dc38-4034-b207-c060-c6ceccacd599/data/data3/current/BP-486461787-172.17.0.2-1731992230528 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:58:19,807 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/cluster_de53dc38-4034-b207-c060-c6ceccacd599/data/data4/current/BP-486461787-172.17.0.2-1731992230528 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:58:19,807 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T04:58:19,809 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@79b422de{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:58:19,810 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@73447fd2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T04:58:19,810 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T04:58:19,810 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fc50460{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T04:58:19,810 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@66ce6cc0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/hadoop.log.dir/,STOPPED} 2024-11-19T04:58:19,811 WARN [BP-486461787-172.17.0.2-1731992230528 heartbeating to localhost/127.0.0.1:40207 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T04:58:19,811 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T04:58:19,811 WARN [BP-486461787-172.17.0.2-1731992230528 heartbeating to localhost/127.0.0.1:40207 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-486461787-172.17.0.2-1731992230528 (Datanode Uuid 2e516bfa-2197-4fba-84b8-20b57429a12f) service to localhost/127.0.0.1:40207 2024-11-19T04:58:19,811 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T04:58:19,812 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/cluster_de53dc38-4034-b207-c060-c6ceccacd599/data/data1/current/BP-486461787-172.17.0.2-1731992230528 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:58:19,812 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/cluster_de53dc38-4034-b207-c060-c6ceccacd599/data/data2/current/BP-486461787-172.17.0.2-1731992230528 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:58:19,812 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T04:58:19,820 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@62c26df6{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T04:58:19,821 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@8987cea{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T04:58:19,821 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T04:58:19,821 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59703725{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T04:58:19,821 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4cf57465{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/hadoop.log.dir/,STOPPED} 2024-11-19T04:58:19,829 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T04:58:19,860 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T04:58:19,870 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=229 (was 209) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:40207 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40207 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40207 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40207 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40207 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40207 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40207 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40207 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=503 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=208 (was 190) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=11993 (was 11276) - AvailableMemoryMB LEAK? - 2024-11-19T04:58:19,884 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=229, OpenFileDescriptor=503, MaxFileDescriptor=1048576, SystemLoadAverage=208, ProcessCount=12, AvailableMemoryMB=11992 2024-11-19T04:58:19,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T04:58:19,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/hadoop.log.dir so I do NOT create it in target/test-data/7e687143-3ee9-5bc4-c204-693489772b88 2024-11-19T04:58:19,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2dfccc8f-5714-2982-9e21-6ddb5331155c/hadoop.tmp.dir so I do NOT create it in target/test-data/7e687143-3ee9-5bc4-c204-693489772b88 2024-11-19T04:58:19,885 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/cluster_a163db0d-7c5f-cc41-7223-e5b47e82f60c, deleteOnExit=true 2024-11-19T04:58:19,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T04:58:19,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/test.cache.data in system properties and HBase conf 2024-11-19T04:58:19,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T04:58:19,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/hadoop.log.dir in system properties and HBase conf 2024-11-19T04:58:19,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T04:58:19,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T04:58:19,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T04:58:19,885 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T04:58:19,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T04:58:19,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T04:58:19,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T04:58:19,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T04:58:19,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T04:58:19,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T04:58:19,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T04:58:19,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T04:58:19,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T04:58:19,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/nfs.dump.dir in system properties and HBase conf 2024-11-19T04:58:19,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/java.io.tmpdir in system properties and HBase conf 2024-11-19T04:58:19,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T04:58:19,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T04:58:19,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T04:58:19,900 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T04:58:19,970 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:58:19,974 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T04:58:19,976 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T04:58:19,976 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T04:58:19,976 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T04:58:19,977 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:58:19,980 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2c1884c9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/hadoop.log.dir/,AVAILABLE} 2024-11-19T04:58:19,981 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d7a0e2e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T04:58:20,104 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7fa6f74d{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/java.io.tmpdir/jetty-localhost-44033-hadoop-hdfs-3_4_1-tests_jar-_-any-7975341843896350537/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T04:58:20,105 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@93c51d{HTTP/1.1, (http/1.1)}{localhost:44033} 2024-11-19T04:58:20,105 INFO [Time-limited test {}] server.Server(415): Started @308150ms 2024-11-19T04:58:20,119 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T04:58:20,178 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:58:20,180 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T04:58:20,181 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T04:58:20,181 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T04:58:20,182 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T04:58:20,182 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f387f47{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/hadoop.log.dir/,AVAILABLE} 2024-11-19T04:58:20,182 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24bb5ef7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T04:58:20,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:20,307 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@78047c32{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/java.io.tmpdir/jetty-localhost-46385-hadoop-hdfs-3_4_1-tests_jar-_-any-15249238272888222338/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:58:20,307 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1d359c98{HTTP/1.1, (http/1.1)}{localhost:46385} 2024-11-19T04:58:20,307 INFO [Time-limited test {}] server.Server(415): Started @308352ms 2024-11-19T04:58:20,309 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T04:58:20,351 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T04:58:20,354 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T04:58:20,355 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T04:58:20,355 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T04:58:20,355 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T04:58:20,355 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@223a801d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/hadoop.log.dir/,AVAILABLE} 2024-11-19T04:58:20,356 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@238bf9b3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T04:58:20,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:20,420 WARN [Thread-2471 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/cluster_a163db0d-7c5f-cc41-7223-e5b47e82f60c/data/data1/current/BP-1388505216-172.17.0.2-1731992299906/current, will proceed with Du for space computation calculation, 2024-11-19T04:58:20,420 WARN [Thread-2472 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/cluster_a163db0d-7c5f-cc41-7223-e5b47e82f60c/data/data2/current/BP-1388505216-172.17.0.2-1731992299906/current, will proceed with Du for space computation calculation, 2024-11-19T04:58:20,446 WARN [Thread-2450 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T04:58:20,448 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2d176a0e288289ee with lease ID 0xc659ea47fab86da1: Processing first storage report for DS-fae49b43-841b-4a50-b4b2-dba11181ee8e from datanode DatanodeRegistration(127.0.0.1:40691, datanodeUuid=7d07476d-291a-4ffd-8156-d95e99c109c5, infoPort=38425, infoSecurePort=0, ipcPort=35643, storageInfo=lv=-57;cid=testClusterID;nsid=1589838219;c=1731992299906) 2024-11-19T04:58:20,448 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2d176a0e288289ee with lease ID 0xc659ea47fab86da1: from storage DS-fae49b43-841b-4a50-b4b2-dba11181ee8e node DatanodeRegistration(127.0.0.1:40691, datanodeUuid=7d07476d-291a-4ffd-8156-d95e99c109c5, infoPort=38425, infoSecurePort=0, ipcPort=35643, storageInfo=lv=-57;cid=testClusterID;nsid=1589838219;c=1731992299906), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:58:20,448 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2d176a0e288289ee with lease ID 0xc659ea47fab86da1: Processing first storage report for DS-56d3dc79-9bbf-44a7-9779-443e5b3dfab6 from datanode DatanodeRegistration(127.0.0.1:40691, datanodeUuid=7d07476d-291a-4ffd-8156-d95e99c109c5, infoPort=38425, infoSecurePort=0, ipcPort=35643, storageInfo=lv=-57;cid=testClusterID;nsid=1589838219;c=1731992299906) 2024-11-19T04:58:20,449 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2d176a0e288289ee with lease ID 0xc659ea47fab86da1: from storage DS-56d3dc79-9bbf-44a7-9779-443e5b3dfab6 node DatanodeRegistration(127.0.0.1:40691, datanodeUuid=7d07476d-291a-4ffd-8156-d95e99c109c5, infoPort=38425, infoSecurePort=0, ipcPort=35643, storageInfo=lv=-57;cid=testClusterID;nsid=1589838219;c=1731992299906), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:58:20,486 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4c4c959a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/java.io.tmpdir/jetty-localhost-44201-hadoop-hdfs-3_4_1-tests_jar-_-any-11298281108699744084/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:58:20,486 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4edfb46d{HTTP/1.1, (http/1.1)}{localhost:44201} 2024-11-19T04:58:20,486 INFO [Time-limited test {}] server.Server(415): Started @308531ms 2024-11-19T04:58:20,487 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T04:58:20,584 WARN [Thread-2497 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/cluster_a163db0d-7c5f-cc41-7223-e5b47e82f60c/data/data3/current/BP-1388505216-172.17.0.2-1731992299906/current, will proceed with Du for space computation calculation, 2024-11-19T04:58:20,584 WARN [Thread-2498 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/cluster_a163db0d-7c5f-cc41-7223-e5b47e82f60c/data/data4/current/BP-1388505216-172.17.0.2-1731992299906/current, will proceed with Du for space computation calculation, 2024-11-19T04:58:20,603 WARN [Thread-2486 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T04:58:20,606 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfc54b7fe0505e531 with lease ID 0xc659ea47fab86da2: Processing first storage report for DS-e3b7aa90-9fa2-4184-88de-769af660e114 from datanode DatanodeRegistration(127.0.0.1:40183, datanodeUuid=67afe9ee-a0fb-4af0-9833-d2760ba84e1f, infoPort=45599, infoSecurePort=0, ipcPort=42365, storageInfo=lv=-57;cid=testClusterID;nsid=1589838219;c=1731992299906) 2024-11-19T04:58:20,606 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfc54b7fe0505e531 with lease ID 0xc659ea47fab86da2: from storage DS-e3b7aa90-9fa2-4184-88de-769af660e114 node DatanodeRegistration(127.0.0.1:40183, datanodeUuid=67afe9ee-a0fb-4af0-9833-d2760ba84e1f, infoPort=45599, infoSecurePort=0, ipcPort=42365, storageInfo=lv=-57;cid=testClusterID;nsid=1589838219;c=1731992299906), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-19T04:58:20,606 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfc54b7fe0505e531 with lease ID 0xc659ea47fab86da2: Processing first storage report for DS-ea328ca5-85f5-4425-bfca-08aacbd1cbde from datanode DatanodeRegistration(127.0.0.1:40183, datanodeUuid=67afe9ee-a0fb-4af0-9833-d2760ba84e1f, infoPort=45599, infoSecurePort=0, ipcPort=42365, storageInfo=lv=-57;cid=testClusterID;nsid=1589838219;c=1731992299906) 2024-11-19T04:58:20,606 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfc54b7fe0505e531 with lease ID 0xc659ea47fab86da2: from storage DS-ea328ca5-85f5-4425-bfca-08aacbd1cbde node DatanodeRegistration(127.0.0.1:40183, datanodeUuid=67afe9ee-a0fb-4af0-9833-d2760ba84e1f, infoPort=45599, infoSecurePort=0, ipcPort=42365, storageInfo=lv=-57;cid=testClusterID;nsid=1589838219;c=1731992299906), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T04:58:20,613 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88 2024-11-19T04:58:20,619 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/cluster_a163db0d-7c5f-cc41-7223-e5b47e82f60c/zookeeper_0, clientPort=61050, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/cluster_a163db0d-7c5f-cc41-7223-e5b47e82f60c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/cluster_a163db0d-7c5f-cc41-7223-e5b47e82f60c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T04:58:20,620 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=61050 2024-11-19T04:58:20,620 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:58:20,622 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:58:20,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741825_1001 (size=7) 2024-11-19T04:58:20,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40691 is added to blk_1073741825_1001 (size=7) 2024-11-19T04:58:20,632 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754 with version=8 2024-11-19T04:58:20,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35757/user/jenkins/test-data/0edc5c7b-6a25-7082-a9b3-09de3862a1b7/hbase-staging 2024-11-19T04:58:20,634 INFO [Time-limited test {}] client.ConnectionUtils(128): master/08a7f35e60d4:0 server-side Connection retries=45 2024-11-19T04:58:20,634 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T04:58:20,634 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T04:58:20,634 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T04:58:20,634 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T04:58:20,634 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T04:58:20,634 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T04:58:20,634 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T04:58:20,635 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42741 2024-11-19T04:58:20,636 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42741 connecting to ZooKeeper ensemble=127.0.0.1:61050 2024-11-19T04:58:20,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:427410x0, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T04:58:20,644 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42741-0x1012e9716d90000 connected 2024-11-19T04:58:20,658 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:58:20,660 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:58:20,662 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42741-0x1012e9716d90000, quorum=127.0.0.1:61050, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T04:58:20,662 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754, hbase.cluster.distributed=false 2024-11-19T04:58:20,664 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42741-0x1012e9716d90000, quorum=127.0.0.1:61050, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T04:58:20,664 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42741 2024-11-19T04:58:20,664 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42741 2024-11-19T04:58:20,665 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42741 2024-11-19T04:58:20,666 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42741 2024-11-19T04:58:20,666 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42741 2024-11-19T04:58:20,685 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/08a7f35e60d4:0 server-side Connection retries=45 2024-11-19T04:58:20,685 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T04:58:20,685 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T04:58:20,685 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T04:58:20,685 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T04:58:20,685 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T04:58:20,685 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T04:58:20,685 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T04:58:20,686 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43765 2024-11-19T04:58:20,686 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43765 connecting to ZooKeeper ensemble=127.0.0.1:61050 2024-11-19T04:58:20,687 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:58:20,688 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:58:20,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:437650x0, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T04:58:20,693 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:437650x0, quorum=127.0.0.1:61050, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T04:58:20,693 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43765-0x1012e9716d90001 connected 2024-11-19T04:58:20,693 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T04:58:20,693 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T04:58:20,694 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43765-0x1012e9716d90001, quorum=127.0.0.1:61050, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T04:58:20,695 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43765-0x1012e9716d90001, quorum=127.0.0.1:61050, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T04:58:20,696 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43765 2024-11-19T04:58:20,696 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43765 2024-11-19T04:58:20,697 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43765 2024-11-19T04:58:20,697 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43765 2024-11-19T04:58:20,697 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43765 2024-11-19T04:58:20,709 DEBUG [M:0;08a7f35e60d4:42741 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;08a7f35e60d4:42741 2024-11-19T04:58:20,710 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/08a7f35e60d4,42741,1731992300634 2024-11-19T04:58:20,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42741-0x1012e9716d90000, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T04:58:20,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43765-0x1012e9716d90001, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T04:58:20,712 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42741-0x1012e9716d90000, quorum=127.0.0.1:61050, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/08a7f35e60d4,42741,1731992300634 2024-11-19T04:58:20,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42741-0x1012e9716d90000, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:58:20,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43765-0x1012e9716d90001, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T04:58:20,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43765-0x1012e9716d90001, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:58:20,715 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42741-0x1012e9716d90000, quorum=127.0.0.1:61050, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T04:58:20,715 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/08a7f35e60d4,42741,1731992300634 from backup master directory 2024-11-19T04:58:20,716 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42741-0x1012e9716d90000, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/08a7f35e60d4,42741,1731992300634 2024-11-19T04:58:20,716 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43765-0x1012e9716d90001, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T04:58:20,716 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42741-0x1012e9716d90000, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T04:58:20,716 WARN [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T04:58:20,716 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=08a7f35e60d4,42741,1731992300634 2024-11-19T04:58:20,720 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/hbase.id] with ID: 4efe2f44-02f1-474b-9667-ee306f6cc5a4 2024-11-19T04:58:20,720 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/.tmp/hbase.id 2024-11-19T04:58:20,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741826_1002 (size=42) 2024-11-19T04:58:20,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40691 is added to blk_1073741826_1002 (size=42) 2024-11-19T04:58:20,730 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/.tmp/hbase.id]:[hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/hbase.id] 2024-11-19T04:58:20,741 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:58:20,742 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T04:58:20,743 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-19T04:58:20,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43765-0x1012e9716d90001, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:58:20,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42741-0x1012e9716d90000, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:58:20,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741827_1003 (size=196) 2024-11-19T04:58:20,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40691 is added to blk_1073741827_1003 (size=196) 2024-11-19T04:58:20,753 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T04:58:20,753 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T04:58:20,754 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T04:58:20,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40691 is added to blk_1073741828_1004 (size=1189) 2024-11-19T04:58:20,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741828_1004 (size=1189) 2024-11-19T04:58:20,761 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/MasterData/data/master/store 2024-11-19T04:58:20,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741829_1005 (size=34) 2024-11-19T04:58:20,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40691 is added to blk_1073741829_1005 (size=34) 2024-11-19T04:58:20,768 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:58:20,768 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T04:58:20,768 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:58:20,768 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:58:20,768 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T04:58:20,768 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:58:20,768 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:58:20,768 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731992300768Disabling compacts and flushes for region at 1731992300768Disabling writes for close at 1731992300768Writing region close event to WAL at 1731992300768Closed at 1731992300768 2024-11-19T04:58:20,769 WARN [master/08a7f35e60d4:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/MasterData/data/master/store/.initializing 2024-11-19T04:58:20,769 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/MasterData/WALs/08a7f35e60d4,42741,1731992300634 2024-11-19T04:58:20,772 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=08a7f35e60d4%2C42741%2C1731992300634, suffix=, logDir=hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/MasterData/WALs/08a7f35e60d4,42741,1731992300634, archiveDir=hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/MasterData/oldWALs, maxLogs=10 2024-11-19T04:58:20,772 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C42741%2C1731992300634.1731992300772 2024-11-19T04:58:20,777 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/MasterData/WALs/08a7f35e60d4,42741,1731992300634/08a7f35e60d4%2C42741%2C1731992300634.1731992300772 2024-11-19T04:58:20,780 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38425:38425),(127.0.0.1/127.0.0.1:45599:45599)] 2024-11-19T04:58:20,781 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T04:58:20,781 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:58:20,781 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:58:20,781 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:58:20,782 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:58:20,784 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T04:58:20,784 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:58:20,784 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:58:20,784 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:58:20,785 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T04:58:20,785 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:58:20,786 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T04:58:20,786 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:58:20,787 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T04:58:20,787 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:58:20,787 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T04:58:20,787 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:58:20,788 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T04:58:20,788 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:58:20,789 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T04:58:20,789 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:58:20,790 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:58:20,790 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:58:20,791 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:58:20,791 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:58:20,792 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T04:58:20,793 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T04:58:20,795 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T04:58:20,795 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=792920, jitterRate=0.00825105607509613}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T04:58:20,796 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731992300781Initializing all the Stores at 1731992300782 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992300782Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992300782Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992300782Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992300782Cleaning up temporary data from old regions at 1731992300791 (+9 ms)Region opened successfully at 1731992300796 (+5 ms) 2024-11-19T04:58:20,796 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T04:58:20,799 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59aabf0a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=08a7f35e60d4/172.17.0.2:0 2024-11-19T04:58:20,801 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T04:58:20,801 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T04:58:20,801 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T04:58:20,801 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T04:58:20,801 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-19T04:58:20,802 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-19T04:58:20,802 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T04:58:20,804 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T04:58:20,805 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42741-0x1012e9716d90000, quorum=127.0.0.1:61050, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T04:58:20,806 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T04:58:20,807 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T04:58:20,807 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42741-0x1012e9716d90000, quorum=127.0.0.1:61050, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T04:58:20,810 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T04:58:20,810 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T04:58:20,811 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42741-0x1012e9716d90000, quorum=127.0.0.1:61050, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T04:58:20,812 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T04:58:20,813 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42741-0x1012e9716d90000, quorum=127.0.0.1:61050, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T04:58:20,814 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T04:58:20,816 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42741-0x1012e9716d90000, quorum=127.0.0.1:61050, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T04:58:20,817 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T04:58:20,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42741-0x1012e9716d90000, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T04:58:20,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43765-0x1012e9716d90001, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T04:58:20,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43765-0x1012e9716d90001, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:58:20,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42741-0x1012e9716d90000, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:58:20,821 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=08a7f35e60d4,42741,1731992300634, sessionid=0x1012e9716d90000, setting cluster-up flag (Was=false) 2024-11-19T04:58:20,824 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42741-0x1012e9716d90000, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:58:20,824 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43765-0x1012e9716d90001, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:58:20,829 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T04:58:20,830 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=08a7f35e60d4,42741,1731992300634 2024-11-19T04:58:20,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42741-0x1012e9716d90000, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:58:20,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43765-0x1012e9716d90001, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:58:20,839 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T04:58:20,840 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=08a7f35e60d4,42741,1731992300634 2024-11-19T04:58:20,842 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T04:58:20,846 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T04:58:20,846 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T04:58:20,846 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T04:58:20,846 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 08a7f35e60d4,42741,1731992300634 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T04:58:20,847 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/08a7f35e60d4:0, corePoolSize=5, maxPoolSize=5 2024-11-19T04:58:20,847 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/08a7f35e60d4:0, corePoolSize=5, maxPoolSize=5 2024-11-19T04:58:20,847 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/08a7f35e60d4:0, corePoolSize=5, maxPoolSize=5 2024-11-19T04:58:20,848 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/08a7f35e60d4:0, corePoolSize=5, maxPoolSize=5 2024-11-19T04:58:20,848 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/08a7f35e60d4:0, corePoolSize=10, maxPoolSize=10 2024-11-19T04:58:20,848 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:58:20,848 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/08a7f35e60d4:0, corePoolSize=2, maxPoolSize=2 2024-11-19T04:58:20,848 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:58:20,848 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731992330848 2024-11-19T04:58:20,848 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T04:58:20,849 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T04:58:20,849 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T04:58:20,849 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T04:58:20,849 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T04:58:20,849 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T04:58:20,849 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T04:58:20,849 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T04:58:20,849 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T04:58:20,849 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T04:58:20,849 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T04:58:20,850 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T04:58:20,850 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T04:58:20,850 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T04:58:20,850 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:58:20,851 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T04:58:20,851 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.large.0-1731992300850,5,FailOnTimeoutGroup] 2024-11-19T04:58:20,852 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.small.0-1731992300851,5,FailOnTimeoutGroup] 2024-11-19T04:58:20,852 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T04:58:20,852 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T04:58:20,852 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T04:58:20,852 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T04:58:20,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741831_1007 (size=1321) 2024-11-19T04:58:20,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40691 is added to blk_1073741831_1007 (size=1321) 2024-11-19T04:58:20,858 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T04:58:20,858 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754 2024-11-19T04:58:20,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40691 is added to blk_1073741832_1008 (size=32) 2024-11-19T04:58:20,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741832_1008 (size=32) 2024-11-19T04:58:20,865 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:58:20,866 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T04:58:20,867 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T04:58:20,867 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:58:20,868 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:58:20,868 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T04:58:20,869 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T04:58:20,869 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:58:20,869 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:58:20,869 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T04:58:20,870 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T04:58:20,870 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:58:20,871 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:58:20,871 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T04:58:20,872 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T04:58:20,872 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:58:20,873 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:58:20,873 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T04:58:20,873 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/data/hbase/meta/1588230740 2024-11-19T04:58:20,873 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/data/hbase/meta/1588230740 2024-11-19T04:58:20,875 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T04:58:20,875 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T04:58:20,875 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T04:58:20,876 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T04:58:20,878 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T04:58:20,878 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=771400, jitterRate=-0.019114598631858826}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T04:58:20,879 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731992300865Initializing all the Stores at 1731992300866 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992300866Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992300866Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992300866Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992300866Cleaning up temporary data from old regions at 1731992300875 (+9 ms)Region opened successfully at 1731992300879 (+4 ms) 2024-11-19T04:58:20,879 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T04:58:20,879 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T04:58:20,879 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T04:58:20,879 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T04:58:20,879 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T04:58:20,879 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T04:58:20,879 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731992300879Disabling compacts and flushes for region at 1731992300879Disabling writes for close at 1731992300879Writing region close event to WAL at 1731992300879Closed at 1731992300879 2024-11-19T04:58:20,881 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T04:58:20,881 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T04:58:20,881 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T04:58:20,882 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T04:58:20,883 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T04:58:20,899 INFO [RS:0;08a7f35e60d4:43765 {}] regionserver.HRegionServer(746): ClusterId : 4efe2f44-02f1-474b-9667-ee306f6cc5a4 2024-11-19T04:58:20,899 DEBUG [RS:0;08a7f35e60d4:43765 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T04:58:20,901 DEBUG [RS:0;08a7f35e60d4:43765 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T04:58:20,902 DEBUG [RS:0;08a7f35e60d4:43765 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T04:58:20,905 DEBUG [RS:0;08a7f35e60d4:43765 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T04:58:20,905 DEBUG [RS:0;08a7f35e60d4:43765 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@721dc122, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=08a7f35e60d4/172.17.0.2:0 2024-11-19T04:58:20,918 DEBUG [RS:0;08a7f35e60d4:43765 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;08a7f35e60d4:43765 2024-11-19T04:58:20,918 INFO [RS:0;08a7f35e60d4:43765 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T04:58:20,918 INFO [RS:0;08a7f35e60d4:43765 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T04:58:20,918 DEBUG [RS:0;08a7f35e60d4:43765 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T04:58:20,919 INFO [RS:0;08a7f35e60d4:43765 {}] regionserver.HRegionServer(2659): reportForDuty to master=08a7f35e60d4,42741,1731992300634 with port=43765, startcode=1731992300684 2024-11-19T04:58:20,919 DEBUG [RS:0;08a7f35e60d4:43765 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T04:58:20,925 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50813, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T04:58:20,925 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42741 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 08a7f35e60d4,43765,1731992300684 2024-11-19T04:58:20,926 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42741 {}] master.ServerManager(517): Registering regionserver=08a7f35e60d4,43765,1731992300684 2024-11-19T04:58:20,927 DEBUG [RS:0;08a7f35e60d4:43765 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754 2024-11-19T04:58:20,927 DEBUG [RS:0;08a7f35e60d4:43765 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42211 2024-11-19T04:58:20,927 DEBUG [RS:0;08a7f35e60d4:43765 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T04:58:20,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42741-0x1012e9716d90000, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T04:58:20,929 DEBUG [RS:0;08a7f35e60d4:43765 {}] zookeeper.ZKUtil(111): regionserver:43765-0x1012e9716d90001, quorum=127.0.0.1:61050, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/08a7f35e60d4,43765,1731992300684 2024-11-19T04:58:20,929 WARN [RS:0;08a7f35e60d4:43765 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T04:58:20,929 INFO [RS:0;08a7f35e60d4:43765 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T04:58:20,930 DEBUG [RS:0;08a7f35e60d4:43765 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/WALs/08a7f35e60d4,43765,1731992300684 2024-11-19T04:58:20,930 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [08a7f35e60d4,43765,1731992300684] 2024-11-19T04:58:20,935 INFO [RS:0;08a7f35e60d4:43765 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T04:58:20,936 INFO [RS:0;08a7f35e60d4:43765 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T04:58:20,937 INFO [RS:0;08a7f35e60d4:43765 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T04:58:20,937 INFO [RS:0;08a7f35e60d4:43765 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T04:58:20,937 INFO [RS:0;08a7f35e60d4:43765 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T04:58:20,938 INFO [RS:0;08a7f35e60d4:43765 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T04:58:20,938 INFO [RS:0;08a7f35e60d4:43765 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T04:58:20,938 DEBUG [RS:0;08a7f35e60d4:43765 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:58:20,938 DEBUG [RS:0;08a7f35e60d4:43765 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:58:20,938 DEBUG [RS:0;08a7f35e60d4:43765 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:58:20,938 DEBUG [RS:0;08a7f35e60d4:43765 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:58:20,938 DEBUG [RS:0;08a7f35e60d4:43765 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:58:20,938 DEBUG [RS:0;08a7f35e60d4:43765 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/08a7f35e60d4:0, corePoolSize=2, maxPoolSize=2 2024-11-19T04:58:20,938 DEBUG [RS:0;08a7f35e60d4:43765 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:58:20,938 DEBUG [RS:0;08a7f35e60d4:43765 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:58:20,938 DEBUG [RS:0;08a7f35e60d4:43765 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:58:20,938 DEBUG [RS:0;08a7f35e60d4:43765 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:58:20,938 DEBUG [RS:0;08a7f35e60d4:43765 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:58:20,938 DEBUG [RS:0;08a7f35e60d4:43765 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/08a7f35e60d4:0, corePoolSize=1, maxPoolSize=1 2024-11-19T04:58:20,938 DEBUG [RS:0;08a7f35e60d4:43765 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/08a7f35e60d4:0, corePoolSize=3, maxPoolSize=3 2024-11-19T04:58:20,938 DEBUG [RS:0;08a7f35e60d4:43765 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/08a7f35e60d4:0, corePoolSize=3, maxPoolSize=3 2024-11-19T04:58:20,940 INFO [RS:0;08a7f35e60d4:43765 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T04:58:20,940 INFO [RS:0;08a7f35e60d4:43765 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T04:58:20,940 INFO [RS:0;08a7f35e60d4:43765 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T04:58:20,940 INFO [RS:0;08a7f35e60d4:43765 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T04:58:20,940 INFO [RS:0;08a7f35e60d4:43765 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T04:58:20,940 INFO [RS:0;08a7f35e60d4:43765 {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,43765,1731992300684-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T04:58:20,960 INFO [RS:0;08a7f35e60d4:43765 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T04:58:20,961 INFO [RS:0;08a7f35e60d4:43765 {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,43765,1731992300684-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T04:58:20,961 INFO [RS:0;08a7f35e60d4:43765 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:58:20,961 INFO [RS:0;08a7f35e60d4:43765 {}] regionserver.Replication(171): 08a7f35e60d4,43765,1731992300684 started 2024-11-19T04:58:20,975 INFO [RS:0;08a7f35e60d4:43765 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:58:20,975 INFO [RS:0;08a7f35e60d4:43765 {}] regionserver.HRegionServer(1482): Serving as 08a7f35e60d4,43765,1731992300684, RpcServer on 08a7f35e60d4/172.17.0.2:43765, sessionid=0x1012e9716d90001 2024-11-19T04:58:20,975 DEBUG [RS:0;08a7f35e60d4:43765 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T04:58:20,975 DEBUG [RS:0;08a7f35e60d4:43765 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 08a7f35e60d4,43765,1731992300684 2024-11-19T04:58:20,975 DEBUG [RS:0;08a7f35e60d4:43765 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '08a7f35e60d4,43765,1731992300684' 2024-11-19T04:58:20,975 DEBUG [RS:0;08a7f35e60d4:43765 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T04:58:20,976 DEBUG [RS:0;08a7f35e60d4:43765 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T04:58:20,976 DEBUG [RS:0;08a7f35e60d4:43765 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T04:58:20,976 DEBUG [RS:0;08a7f35e60d4:43765 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T04:58:20,976 DEBUG [RS:0;08a7f35e60d4:43765 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 08a7f35e60d4,43765,1731992300684 2024-11-19T04:58:20,976 DEBUG [RS:0;08a7f35e60d4:43765 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '08a7f35e60d4,43765,1731992300684' 2024-11-19T04:58:20,976 DEBUG [RS:0;08a7f35e60d4:43765 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T04:58:20,977 DEBUG [RS:0;08a7f35e60d4:43765 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T04:58:20,977 DEBUG [RS:0;08a7f35e60d4:43765 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T04:58:20,977 INFO [RS:0;08a7f35e60d4:43765 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T04:58:20,977 INFO [RS:0;08a7f35e60d4:43765 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T04:58:21,033 WARN [08a7f35e60d4:42741 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-19T04:58:21,079 INFO [RS:0;08a7f35e60d4:43765 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=08a7f35e60d4%2C43765%2C1731992300684, suffix=, logDir=hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/WALs/08a7f35e60d4,43765,1731992300684, archiveDir=hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/oldWALs, maxLogs=32 2024-11-19T04:58:21,079 INFO [RS:0;08a7f35e60d4:43765 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C43765%2C1731992300684.1731992301079 2024-11-19T04:58:21,085 INFO [RS:0;08a7f35e60d4:43765 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/WALs/08a7f35e60d4,43765,1731992300684/08a7f35e60d4%2C43765%2C1731992300684.1731992301079 2024-11-19T04:58:21,089 DEBUG [RS:0;08a7f35e60d4:43765 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45599:45599),(127.0.0.1/127.0.0.1:38425:38425)] 2024-11-19T04:58:21,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:21,283 DEBUG [08a7f35e60d4:42741 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T04:58:21,284 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=08a7f35e60d4,43765,1731992300684 2024-11-19T04:58:21,286 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 08a7f35e60d4,43765,1731992300684, state=OPENING 2024-11-19T04:58:21,287 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T04:58:21,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42741-0x1012e9716d90000, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:58:21,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43765-0x1012e9716d90001, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:58:21,289 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T04:58:21,289 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T04:58:21,289 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T04:58:21,289 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=08a7f35e60d4,43765,1731992300684}] 2024-11-19T04:58:21,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,38579,1731992098058/08a7f35e60d4%2C38579%2C1731992098058.meta.1731992098895.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:21,442 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T04:58:21,444 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37517, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T04:58:21,448 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T04:58:21,448 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T04:58:21,450 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=08a7f35e60d4%2C43765%2C1731992300684.meta, suffix=.meta, logDir=hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/WALs/08a7f35e60d4,43765,1731992300684, archiveDir=hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/oldWALs, maxLogs=32 2024-11-19T04:58:21,450 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 08a7f35e60d4%2C43765%2C1731992300684.meta.1731992301450.meta 2024-11-19T04:58:21,466 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/WALs/08a7f35e60d4,43765,1731992300684/08a7f35e60d4%2C43765%2C1731992300684.meta.1731992301450.meta 2024-11-19T04:58:21,472 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45599:45599),(127.0.0.1/127.0.0.1:38425:38425)] 2024-11-19T04:58:21,478 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T04:58:21,478 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T04:58:21,478 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T04:58:21,478 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T04:58:21,479 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T04:58:21,479 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T04:58:21,479 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T04:58:21,479 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T04:58:21,484 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T04:58:21,485 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T04:58:21,485 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:58:21,486 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:58:21,486 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T04:58:21,487 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T04:58:21,487 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:58:21,487 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:58:21,487 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T04:58:21,488 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T04:58:21,488 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:58:21,488 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:58:21,488 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T04:58:21,489 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T04:58:21,489 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T04:58:21,489 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T04:58:21,490 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T04:58:21,490 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/data/hbase/meta/1588230740 2024-11-19T04:58:21,491 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/data/hbase/meta/1588230740 2024-11-19T04:58:21,493 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T04:58:21,493 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T04:58:21,493 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T04:58:21,494 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T04:58:21,495 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=800463, jitterRate=0.017842113971710205}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T04:58:21,495 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T04:58:21,496 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731992301479Writing region info on filesystem at 1731992301479Initializing all the Stores at 1731992301480 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992301480Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992301484 (+4 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731992301484Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731992301484Cleaning up temporary data from old regions at 1731992301493 (+9 ms)Running coprocessor post-open hooks at 1731992301495 (+2 ms)Region opened successfully at 1731992301496 (+1 ms) 2024-11-19T04:58:21,497 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731992301442 2024-11-19T04:58:21,499 DEBUG [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T04:58:21,499 INFO [RS_OPEN_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T04:58:21,500 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=08a7f35e60d4,43765,1731992300684 2024-11-19T04:58:21,501 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 08a7f35e60d4,43765,1731992300684, state=OPEN 2024-11-19T04:58:21,508 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42741-0x1012e9716d90000, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T04:58:21,508 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43765-0x1012e9716d90001, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T04:58:21,508 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=08a7f35e60d4,43765,1731992300684 2024-11-19T04:58:21,508 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T04:58:21,508 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T04:58:21,511 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T04:58:21,511 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=08a7f35e60d4,43765,1731992300684 in 219 msec 2024-11-19T04:58:21,513 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T04:58:21,513 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 630 msec 2024-11-19T04:58:21,514 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T04:58:21,514 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T04:58:21,515 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T04:58:21,515 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=08a7f35e60d4,43765,1731992300684, seqNum=-1] 2024-11-19T04:58:21,516 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T04:58:21,517 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47691, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T04:58:21,522 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 676 msec 2024-11-19T04:58:21,522 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731992301522, completionTime=-1 2024-11-19T04:58:21,522 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T04:58:21,522 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-19T04:58:21,524 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-19T04:58:21,524 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731992361524 2024-11-19T04:58:21,524 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731992421524 2024-11-19T04:58:21,524 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-19T04:58:21,525 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,42741,1731992300634-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T04:58:21,525 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,42741,1731992300634-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:58:21,525 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,42741,1731992300634-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:58:21,525 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-08a7f35e60d4:42741, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:58:21,525 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T04:58:21,525 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T04:58:21,527 DEBUG [master/08a7f35e60d4:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T04:58:21,528 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.812sec 2024-11-19T04:58:21,528 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T04:58:21,528 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T04:58:21,528 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T04:58:21,528 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T04:58:21,529 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T04:58:21,529 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,42741,1731992300634-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T04:58:21,529 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,42741,1731992300634-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T04:58:21,531 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T04:58:21,531 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T04:58:21,531 INFO [master/08a7f35e60d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=08a7f35e60d4,42741,1731992300634-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T04:58:21,599 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1dbd8c23, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T04:58:21,600 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 08a7f35e60d4,42741,-1 for getting cluster id 2024-11-19T04:58:21,600 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T04:58:21,605 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '4efe2f44-02f1-474b-9667-ee306f6cc5a4' 2024-11-19T04:58:21,605 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T04:58:21,606 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "4efe2f44-02f1-474b-9667-ee306f6cc5a4" 2024-11-19T04:58:21,606 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79e091f6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T04:58:21,606 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [08a7f35e60d4,42741,-1] 2024-11-19T04:58:21,606 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T04:58:21,606 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:58:21,608 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35426, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T04:58:21,609 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36507c53, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T04:58:21,609 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T04:58:21,610 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=08a7f35e60d4,43765,1731992300684, seqNum=-1] 2024-11-19T04:58:21,611 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T04:58:21,612 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36664, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T04:58:21,613 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=08a7f35e60d4,42741,1731992300634 2024-11-19T04:58:21,614 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T04:58:21,616 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-19T04:58:21,616 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T04:58:21,618 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/WALs/test.com,8080,1, archiveDir=hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/oldWALs, maxLogs=32 2024-11-19T04:58:21,619 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731992301619 2024-11-19T04:58:21,624 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/WALs/test.com,8080,1/test.com%2C8080%2C1.1731992301619 2024-11-19T04:58:21,632 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38425:38425),(127.0.0.1/127.0.0.1:45599:45599)] 2024-11-19T04:58:21,637 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731992301637 2024-11-19T04:58:21,653 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:21,654 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:21,654 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:21,654 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:21,654 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:21,654 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/WALs/test.com,8080,1/test.com%2C8080%2C1.1731992301619 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/WALs/test.com,8080,1/test.com%2C8080%2C1.1731992301637 2024-11-19T04:58:21,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741835_1011 (size=93) 2024-11-19T04:58:21,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40691 is added to blk_1073741835_1011 (size=93) 2024-11-19T04:58:21,668 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38425:38425),(127.0.0.1/127.0.0.1:45599:45599)] 2024-11-19T04:58:21,669 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/WALs/test.com,8080,1/test.com%2C8080%2C1.1731992301619 to hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/oldWALs/test.com%2C8080%2C1.1731992301619 2024-11-19T04:58:21,669 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:21,669 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:21,669 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:21,669 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:21,669 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:21,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741836_1012 (size=93) 2024-11-19T04:58:21,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40691 is added to blk_1073741836_1012 (size=93) 2024-11-19T04:58:21,674 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/oldWALs 2024-11-19T04:58:21,674 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731992301637) 2024-11-19T04:58:21,674 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T04:58:21,674 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T04:58:21,674 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T04:58:21,674 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:58:21,674 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:58:21,674 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T04:58:21,674 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T04:58:21,674 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1458769622, stopped=false 2024-11-19T04:58:21,674 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=08a7f35e60d4,42741,1731992300634 2024-11-19T04:58:21,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42741-0x1012e9716d90000, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T04:58:21,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43765-0x1012e9716d90001, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T04:58:21,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43765-0x1012e9716d90001, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:58:21,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42741-0x1012e9716d90000, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:58:21,676 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T04:58:21,676 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T04:58:21,676 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T04:58:21,676 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:58:21,677 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '08a7f35e60d4,43765,1731992300684' ***** 2024-11-19T04:58:21,677 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T04:58:21,677 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42741-0x1012e9716d90000, quorum=127.0.0.1:61050, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T04:58:21,677 INFO [RS:0;08a7f35e60d4:43765 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T04:58:21,677 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T04:58:21,677 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43765-0x1012e9716d90001, quorum=127.0.0.1:61050, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T04:58:21,677 INFO [RS:0;08a7f35e60d4:43765 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T04:58:21,677 INFO [RS:0;08a7f35e60d4:43765 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T04:58:21,677 INFO [RS:0;08a7f35e60d4:43765 {}] regionserver.HRegionServer(959): stopping server 08a7f35e60d4,43765,1731992300684 2024-11-19T04:58:21,677 INFO [RS:0;08a7f35e60d4:43765 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T04:58:21,677 INFO [RS:0;08a7f35e60d4:43765 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;08a7f35e60d4:43765. 2024-11-19T04:58:21,677 DEBUG [RS:0;08a7f35e60d4:43765 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T04:58:21,677 DEBUG [RS:0;08a7f35e60d4:43765 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:58:21,677 INFO [RS:0;08a7f35e60d4:43765 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T04:58:21,677 INFO [RS:0;08a7f35e60d4:43765 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T04:58:21,677 INFO [RS:0;08a7f35e60d4:43765 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T04:58:21,678 INFO [RS:0;08a7f35e60d4:43765 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T04:58:21,678 INFO [RS:0;08a7f35e60d4:43765 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-19T04:58:21,678 DEBUG [RS:0;08a7f35e60d4:43765 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-19T04:58:21,678 DEBUG [RS:0;08a7f35e60d4:43765 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-19T04:58:21,678 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T04:58:21,678 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T04:58:21,678 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T04:58:21,678 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T04:58:21,678 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T04:58:21,678 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-19T04:58:21,695 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/data/hbase/meta/1588230740/.tmp/ns/5a2df9e92dc54abf91818b44a00612eb is 43, key is default/ns:d/1731992301518/Put/seqid=0 2024-11-19T04:58:21,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40691 is added to blk_1073741837_1013 (size=5153) 2024-11-19T04:58:21,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741837_1013 (size=5153) 2024-11-19T04:58:21,703 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/data/hbase/meta/1588230740/.tmp/ns/5a2df9e92dc54abf91818b44a00612eb 2024-11-19T04:58:21,710 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/data/hbase/meta/1588230740/.tmp/ns/5a2df9e92dc54abf91818b44a00612eb as hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/data/hbase/meta/1588230740/ns/5a2df9e92dc54abf91818b44a00612eb 2024-11-19T04:58:21,715 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/data/hbase/meta/1588230740/ns/5a2df9e92dc54abf91818b44a00612eb, entries=2, sequenceid=6, filesize=5.0 K 2024-11-19T04:58:21,717 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 39ms, sequenceid=6, compaction requested=false 2024-11-19T04:58:21,722 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-19T04:58:21,723 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T04:58:21,723 INFO [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T04:58:21,723 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731992301678Running coprocessor pre-close hooks at 1731992301678Disabling compacts and flushes for region at 1731992301678Disabling writes for close at 1731992301678Obtaining lock to block concurrent updates at 1731992301678Preparing flush snapshotting stores in 1588230740 at 1731992301678Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731992301678Flushing stores of hbase:meta,,1.1588230740 at 1731992301679 (+1 ms)Flushing 1588230740/ns: creating writer at 1731992301679Flushing 1588230740/ns: appending metadata at 1731992301694 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731992301694Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7a2b4a6c: reopening flushed file at 1731992301709 (+15 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 39ms, sequenceid=6, compaction requested=false at 1731992301717 (+8 ms)Writing region close event to WAL at 1731992301718 (+1 ms)Running coprocessor post-close hooks at 1731992301723 (+5 ms)Closed at 1731992301723 2024-11-19T04:58:21,723 DEBUG [RS_CLOSE_META-regionserver/08a7f35e60d4:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T04:58:21,878 INFO [RS:0;08a7f35e60d4:43765 {}] regionserver.HRegionServer(976): stopping server 08a7f35e60d4,43765,1731992300684; all regions closed. 2024-11-19T04:58:21,879 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:21,879 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:21,879 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:21,879 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:21,879 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:21,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40691 is added to blk_1073741834_1010 (size=1152) 2024-11-19T04:58:21,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741834_1010 (size=1152) 2024-11-19T04:58:21,883 DEBUG [RS:0;08a7f35e60d4:43765 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/oldWALs 2024-11-19T04:58:21,883 INFO [RS:0;08a7f35e60d4:43765 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 08a7f35e60d4%2C43765%2C1731992300684.meta:.meta(num 1731992301450) 2024-11-19T04:58:21,884 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:21,884 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:21,884 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:21,884 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:21,884 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:21,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40691 is added to blk_1073741833_1009 (size=93) 2024-11-19T04:58:21,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741833_1009 (size=93) 2024-11-19T04:58:21,888 DEBUG [RS:0;08a7f35e60d4:43765 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/oldWALs 2024-11-19T04:58:21,888 INFO [RS:0;08a7f35e60d4:43765 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 08a7f35e60d4%2C43765%2C1731992300684:(num 1731992301079) 2024-11-19T04:58:21,888 DEBUG [RS:0;08a7f35e60d4:43765 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T04:58:21,888 INFO [RS:0;08a7f35e60d4:43765 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T04:58:21,888 INFO [RS:0;08a7f35e60d4:43765 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T04:58:21,888 INFO [RS:0;08a7f35e60d4:43765 {}] hbase.ChoreService(370): Chore service for: regionserver/08a7f35e60d4:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-19T04:58:21,888 INFO [RS:0;08a7f35e60d4:43765 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T04:58:21,888 INFO [regionserver/08a7f35e60d4:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T04:58:21,888 INFO [RS:0;08a7f35e60d4:43765 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43765 2024-11-19T04:58:21,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42741-0x1012e9716d90000, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T04:58:21,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43765-0x1012e9716d90001, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/08a7f35e60d4,43765,1731992300684 2024-11-19T04:58:21,891 INFO [RS:0;08a7f35e60d4:43765 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T04:58:21,891 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [08a7f35e60d4,43765,1731992300684] 2024-11-19T04:58:21,894 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/08a7f35e60d4,43765,1731992300684 already deleted, retry=false 2024-11-19T04:58:21,894 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 08a7f35e60d4,43765,1731992300684 expired; onlineServers=0 2024-11-19T04:58:21,894 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '08a7f35e60d4,42741,1731992300634' ***** 2024-11-19T04:58:21,894 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T04:58:21,894 INFO [M:0;08a7f35e60d4:42741 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T04:58:21,894 INFO [M:0;08a7f35e60d4:42741 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T04:58:21,894 DEBUG [M:0;08a7f35e60d4:42741 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T04:58:21,894 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T04:58:21,894 DEBUG [M:0;08a7f35e60d4:42741 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T04:58:21,894 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.small.0-1731992300851 {}] cleaner.HFileCleaner(306): Exit Thread[master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.small.0-1731992300851,5,FailOnTimeoutGroup] 2024-11-19T04:58:21,894 DEBUG [master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.large.0-1731992300850 {}] cleaner.HFileCleaner(306): Exit Thread[master/08a7f35e60d4:0:becomeActiveMaster-HFileCleaner.large.0-1731992300850,5,FailOnTimeoutGroup] 2024-11-19T04:58:21,894 INFO [M:0;08a7f35e60d4:42741 {}] hbase.ChoreService(370): Chore service for: master/08a7f35e60d4:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T04:58:21,895 INFO [M:0;08a7f35e60d4:42741 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T04:58:21,895 DEBUG [M:0;08a7f35e60d4:42741 {}] master.HMaster(1795): Stopping service threads 2024-11-19T04:58:21,895 INFO [M:0;08a7f35e60d4:42741 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T04:58:21,895 INFO [M:0;08a7f35e60d4:42741 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T04:58:21,895 INFO [M:0;08a7f35e60d4:42741 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T04:58:21,895 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T04:58:21,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42741-0x1012e9716d90000, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T04:58:21,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42741-0x1012e9716d90000, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T04:58:21,896 DEBUG [M:0;08a7f35e60d4:42741 {}] zookeeper.ZKUtil(347): master:42741-0x1012e9716d90000, quorum=127.0.0.1:61050, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T04:58:21,896 WARN [M:0;08a7f35e60d4:42741 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T04:58:21,896 INFO [M:0;08a7f35e60d4:42741 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/.lastflushedseqids 2024-11-19T04:58:21,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741838_1014 (size=99) 2024-11-19T04:58:21,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40691 is added to blk_1073741838_1014 (size=99) 2024-11-19T04:58:21,902 INFO [M:0;08a7f35e60d4:42741 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T04:58:21,902 INFO [M:0;08a7f35e60d4:42741 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T04:58:21,902 DEBUG [M:0;08a7f35e60d4:42741 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T04:58:21,902 INFO [M:0;08a7f35e60d4:42741 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:58:21,902 DEBUG [M:0;08a7f35e60d4:42741 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:58:21,902 DEBUG [M:0;08a7f35e60d4:42741 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T04:58:21,902 DEBUG [M:0;08a7f35e60d4:42741 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:58:21,903 INFO [M:0;08a7f35e60d4:42741 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-19T04:58:21,922 DEBUG [M:0;08a7f35e60d4:42741 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/62240d6b211249b682c2faa28b031804 is 82, key is hbase:meta,,1/info:regioninfo/1731992301500/Put/seqid=0 2024-11-19T04:58:21,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40691 is added to blk_1073741839_1015 (size=5672) 2024-11-19T04:58:21,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741839_1015 (size=5672) 2024-11-19T04:58:21,928 INFO [M:0;08a7f35e60d4:42741 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/62240d6b211249b682c2faa28b031804 2024-11-19T04:58:21,947 DEBUG [M:0;08a7f35e60d4:42741 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f0282959fb7a4b5592695a8b549a1319 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731992301521/Put/seqid=0 2024-11-19T04:58:21,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741840_1016 (size=5275) 2024-11-19T04:58:21,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40691 is added to blk_1073741840_1016 (size=5275) 2024-11-19T04:58:21,953 INFO [M:0;08a7f35e60d4:42741 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f0282959fb7a4b5592695a8b549a1319 2024-11-19T04:58:21,981 DEBUG [M:0;08a7f35e60d4:42741 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3d731600e22e4af2ba805f7f3ae69f00 is 69, key is 08a7f35e60d4,43765,1731992300684/rs:state/1731992300926/Put/seqid=0 2024-11-19T04:58:21,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40691 is added to blk_1073741841_1017 (size=5156) 2024-11-19T04:58:21,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741841_1017 (size=5156) 2024-11-19T04:58:21,987 INFO [M:0;08a7f35e60d4:42741 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3d731600e22e4af2ba805f7f3ae69f00 2024-11-19T04:58:21,992 INFO [RS:0;08a7f35e60d4:43765 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T04:58:21,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43765-0x1012e9716d90001, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T04:58:21,993 INFO [RS:0;08a7f35e60d4:43765 {}] regionserver.HRegionServer(1031): Exiting; stopping=08a7f35e60d4,43765,1731992300684; zookeeper connection closed. 2024-11-19T04:58:21,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43765-0x1012e9716d90001, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T04:58:21,993 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6cde9bda {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6cde9bda 2024-11-19T04:58:21,993 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-19T04:58:22,013 DEBUG [M:0;08a7f35e60d4:42741 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/14a45e0dcc304097af2bc8f54b6ecdd8 is 52, key is load_balancer_on/state:d/1731992301615/Put/seqid=0 2024-11-19T04:58:22,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741842_1018 (size=5056) 2024-11-19T04:58:22,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40691 is added to blk_1073741842_1018 (size=5056) 2024-11-19T04:58:22,023 INFO [M:0;08a7f35e60d4:42741 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/14a45e0dcc304097af2bc8f54b6ecdd8 2024-11-19T04:58:22,028 DEBUG [M:0;08a7f35e60d4:42741 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/62240d6b211249b682c2faa28b031804 as hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/62240d6b211249b682c2faa28b031804 2024-11-19T04:58:22,032 INFO [M:0;08a7f35e60d4:42741 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/62240d6b211249b682c2faa28b031804, entries=8, sequenceid=29, filesize=5.5 K 2024-11-19T04:58:22,034 DEBUG [M:0;08a7f35e60d4:42741 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f0282959fb7a4b5592695a8b549a1319 as hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f0282959fb7a4b5592695a8b549a1319 2024-11-19T04:58:22,038 INFO [M:0;08a7f35e60d4:42741 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f0282959fb7a4b5592695a8b549a1319, entries=3, sequenceid=29, filesize=5.2 K 2024-11-19T04:58:22,039 DEBUG [M:0;08a7f35e60d4:42741 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3d731600e22e4af2ba805f7f3ae69f00 as hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3d731600e22e4af2ba805f7f3ae69f00 2024-11-19T04:58:22,044 INFO [M:0;08a7f35e60d4:42741 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3d731600e22e4af2ba805f7f3ae69f00, entries=1, sequenceid=29, filesize=5.0 K 2024-11-19T04:58:22,045 DEBUG [M:0;08a7f35e60d4:42741 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/14a45e0dcc304097af2bc8f54b6ecdd8 as hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/14a45e0dcc304097af2bc8f54b6ecdd8 2024-11-19T04:58:22,050 INFO [M:0;08a7f35e60d4:42741 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42211/user/jenkins/test-data/e6cc7e06-5d25-94d8-ac5f-9e2d60434754/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/14a45e0dcc304097af2bc8f54b6ecdd8, entries=1, sequenceid=29, filesize=4.9 K 2024-11-19T04:58:22,051 INFO [M:0;08a7f35e60d4:42741 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 149ms, sequenceid=29, compaction requested=false 2024-11-19T04:58:22,053 INFO [M:0;08a7f35e60d4:42741 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T04:58:22,053 DEBUG [M:0;08a7f35e60d4:42741 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731992301902Disabling compacts and flushes for region at 1731992301902Disabling writes for close at 1731992301902Obtaining lock to block concurrent updates at 1731992301903 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731992301903Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731992301903Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731992301903Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731992301904 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731992301922 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731992301922Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731992301932 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731992301947 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731992301947Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731992301958 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731992301980 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731992301980Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731992301991 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731992302013 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731992302013Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4b6bd67: reopening flushed file at 1731992302027 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7fa59647: reopening flushed file at 1731992302033 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@25f31c1: reopening flushed file at 1731992302038 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@76e7fd3f: reopening flushed file at 1731992302044 (+6 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 149ms, sequenceid=29, compaction requested=false at 1731992302051 (+7 ms)Writing region close event to WAL at 1731992302053 (+2 ms)Closed at 1731992302053 2024-11-19T04:58:22,054 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:22,054 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:22,054 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:22,054 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:22,054 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T04:58:22,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741830_1006 (size=10311) 2024-11-19T04:58:22,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40691 is added to blk_1073741830_1006 (size=10311) 2024-11-19T04:58:22,057 INFO [M:0;08a7f35e60d4:42741 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T04:58:22,057 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T04:58:22,057 INFO [M:0;08a7f35e60d4:42741 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42741 2024-11-19T04:58:22,058 INFO [M:0;08a7f35e60d4:42741 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T04:58:22,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42741-0x1012e9716d90000, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T04:58:22,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42741-0x1012e9716d90000, quorum=127.0.0.1:61050, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T04:58:22,160 INFO [M:0;08a7f35e60d4:42741 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T04:58:22,163 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4c4c959a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:58:22,163 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4edfb46d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T04:58:22,163 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T04:58:22,163 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@238bf9b3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T04:58:22,163 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@223a801d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/hadoop.log.dir/,STOPPED} 2024-11-19T04:58:22,165 WARN [BP-1388505216-172.17.0.2-1731992299906 heartbeating to localhost/127.0.0.1:42211 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T04:58:22,165 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T04:58:22,165 WARN [BP-1388505216-172.17.0.2-1731992299906 heartbeating to localhost/127.0.0.1:42211 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1388505216-172.17.0.2-1731992299906 (Datanode Uuid 67afe9ee-a0fb-4af0-9833-d2760ba84e1f) service to localhost/127.0.0.1:42211 2024-11-19T04:58:22,165 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T04:58:22,165 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/cluster_a163db0d-7c5f-cc41-7223-e5b47e82f60c/data/data3/current/BP-1388505216-172.17.0.2-1731992299906 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:58:22,166 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/cluster_a163db0d-7c5f-cc41-7223-e5b47e82f60c/data/data4/current/BP-1388505216-172.17.0.2-1731992299906 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:58:22,166 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T04:58:22,168 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@78047c32{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T04:58:22,168 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1d359c98{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T04:58:22,168 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T04:58:22,168 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24bb5ef7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T04:58:22,168 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4f387f47{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/hadoop.log.dir/,STOPPED} 2024-11-19T04:58:22,169 WARN [BP-1388505216-172.17.0.2-1731992299906 heartbeating to localhost/127.0.0.1:42211 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T04:58:22,169 WARN [BP-1388505216-172.17.0.2-1731992299906 heartbeating to localhost/127.0.0.1:42211 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1388505216-172.17.0.2-1731992299906 (Datanode Uuid 7d07476d-291a-4ffd-8156-d95e99c109c5) service to localhost/127.0.0.1:42211 2024-11-19T04:58:22,170 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/cluster_a163db0d-7c5f-cc41-7223-e5b47e82f60c/data/data1/current/BP-1388505216-172.17.0.2-1731992299906 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:58:22,170 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/cluster_a163db0d-7c5f-cc41-7223-e5b47e82f60c/data/data2/current/BP-1388505216-172.17.0.2-1731992299906 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T04:58:22,170 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T04:58:22,171 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T04:58:22,171 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T04:58:22,176 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7fa6f74d{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T04:58:22,177 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@93c51d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T04:58:22,177 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T04:58:22,177 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d7a0e2e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T04:58:22,177 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2c1884c9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7e687143-3ee9-5bc4-c204-693489772b88/hadoop.log.dir/,STOPPED} 2024-11-19T04:58:22,188 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T04:58:22,204 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T04:58:22,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41423/user/jenkins/test-data/80ada1f8-212d-7730-4ec8-703afabb12e7/WALs/08a7f35e60d4,42287,1731992099051/08a7f35e60d4%2C42287%2C1731992099051.1731992099260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T04:58:22,213 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=270 (was 229) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42211 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42211 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42211 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42211 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:42211 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42211 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42211 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42211 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=534 (was 503) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=208 (was 208), ProcessCount=11 (was 12), AvailableMemoryMB=11970 (was 11992)